1
/******************************************************
2
The database server main program
4
NOTE: SQL Server 7 uses something which the documentation
5
calls user mode scheduled threads (UMS threads). One such
6
thread is usually allocated per processor. Win32
7
documentation does not know any UMS threads, which suggests
8
that the concept is internal to SQL Server 7. It may mean that
9
SQL Server 7 does all the scheduling of threads itself, even
10
in i/o waits. We should maybe modify Innobase to use the same
11
technique, because thread switches within NT may be too slow.
13
SQL Server 7 also mentions fibers, which are cooperatively
14
scheduled threads. They can boost performance by 5 %,
15
according to the Delaney and Soukup's book.
17
Windows 2000 will have something called thread pooling
18
(see msdn website), which we could possibly use.
20
Another possibility could be to use some very fast user space
21
thread library. This might confuse NT though.
25
Created 10/8/1995 Heikki Tuuri
26
*******************************************************/
33
#include "sync0sync.h"
41
#include "odbc0odbc.h"
42
#include "pars0pars.h"
44
#include "lock0lock.h"
45
#include "trx0purge.h"
46
#include "ibuf0ibuf.h"
50
/* The following counter is incremented whenever there is some user activity
52
ulint srv_activity_count = 0;
54
/* Server parameters which are read from the initfile */
56
/* The following three are dir paths which are catenated before file
57
names, where the file name itself may also contain a path */
59
char* srv_data_home = NULL;
60
char* srv_logs_home = NULL;
61
char* srv_arch_dir = NULL;
63
ulint srv_n_data_files = 0;
64
char** srv_data_file_names = NULL;
65
ulint* srv_data_file_sizes = NULL; /* size in database pages */
67
char** srv_log_group_home_dirs = NULL;
69
ulint srv_n_log_groups = ULINT_MAX;
70
ulint srv_n_log_files = ULINT_MAX;
71
ulint srv_log_file_size = ULINT_MAX; /* size in database pages */
72
ibool srv_log_archive_on = TRUE;
73
ulint srv_log_buffer_size = ULINT_MAX; /* size in database pages */
74
ibool srv_flush_log_at_trx_commit = TRUE;
76
ibool srv_use_native_aio = FALSE;
78
ulint srv_pool_size = ULINT_MAX; /* size in database pages;
79
MySQL originally sets this
81
ulint srv_mem_pool_size = ULINT_MAX; /* size in bytes */
82
ulint srv_lock_table_size = ULINT_MAX;
84
ulint srv_n_file_io_threads = ULINT_MAX;
86
ibool srv_archive_recovery = 0;
87
dulint srv_archive_recovery_limit_lsn;
89
ulint srv_lock_wait_timeout = 1024 * 1024 * 1024;
90
/*-------------------------------------------*/
91
ulint srv_n_spin_wait_rounds = 20;
92
ulint srv_spin_wait_delay = 5;
93
ibool srv_priority_boost = TRUE;
94
char srv_endpoint_name[COM_MAX_ADDR_LEN];
95
ulint srv_n_com_threads = ULINT_MAX;
96
ulint srv_n_worker_threads = ULINT_MAX;
98
ibool srv_print_thread_releases = FALSE;
99
ibool srv_print_lock_waits = FALSE;
100
ibool srv_print_buf_io = FALSE;
101
ibool srv_print_log_io = FALSE;
102
ibool srv_print_latch_waits = FALSE;
104
/* The parameters below are obsolete: */
106
ibool srv_print_parsed_sql = FALSE;
108
ulint srv_sim_disk_wait_pct = ULINT_MAX;
109
ulint srv_sim_disk_wait_len = ULINT_MAX;
110
ibool srv_sim_disk_wait_by_yield = FALSE;
111
ibool srv_sim_disk_wait_by_wait = FALSE;
113
ibool srv_measure_contention = FALSE;
114
ibool srv_measure_by_spin = FALSE;
116
ibool srv_test_extra_mutexes = FALSE;
117
ibool srv_test_nocache = FALSE;
118
ibool srv_test_cache_evict = FALSE;
120
ibool srv_test_sync = FALSE;
121
ulint srv_test_n_threads = ULINT_MAX;
122
ulint srv_test_n_loops = ULINT_MAX;
123
ulint srv_test_n_free_rnds = ULINT_MAX;
124
ulint srv_test_n_reserved_rnds = ULINT_MAX;
125
ulint srv_test_array_size = ULINT_MAX;
126
ulint srv_test_n_mutexes = ULINT_MAX;
129
IMPLEMENTATION OF THE SERVER MAIN PROGRAM
130
=========================================
132
There is the following analogue between this database
133
server and an operating system kernel:
135
DB concept equivalent OS concept
136
---------- ---------------------
137
transaction -- process;
139
query thread -- thread;
144
the rollback state -- kill signal delivered to a process;
148
query thread execution:
149
(a) without kernel mutex
150
reserved -- process executing in user mode;
151
(b) with kernel mutex reserved
152
-- process executing in kernel mode;
154
The server is controlled by a master thread which runs at
155
a priority higher than normal, that is, higher than user threads.
156
It sleeps most of the time, and wakes up, say, every 300 milliseconds,
157
to check whether there is anything happening in the server which
158
requires intervention of the master thread. Such situations may be,
159
for example, when flushing of dirty blocks is needed in the buffer
160
pool or old version of database rows have to be cleaned away.
162
The threads which we call user threads serve the queries of
163
the clients and input from the console of the server.
164
They run at normal priority. The server may have several
165
communications endpoints. A dedicated set of user threads waits
166
at each of these endpoints ready to receive a client request.
167
Each request is taken by a single user thread, which then starts
168
processing and, when the result is ready, sends it to the client
169
and returns to wait at the same endpoint the thread started from.
171
So, we do not have dedicated communication threads listening at
172
the endpoints and dealing the jobs to dedicated worker threads.
173
Our architecture saves one thread swithch per request, compared
174
to the solution with dedicated communication threads
175
which amounts to 15 microseconds on 100 MHz Pentium
176
running NT. If the client
177
is communicating over a network, this saving is negligible, but
178
if the client resides in the same machine, maybe in an SMP machine
179
on a different processor from the server thread, the saving
180
can be important as the threads can communicate over shared
181
memory with an overhead of a few microseconds.
183
We may later implement a dedicated communication thread solution
184
for those endpoints which communicate over a network.
186
Our solution with user threads has two problems: for each endpoint
187
there has to be a number of listening threads. If there are many
188
communication endpoints, it may be difficult to set the right number
189
of concurrent threads in the system, as many of the threads
190
may always be waiting at less busy endpoints. Another problem
191
is queuing of the messages, as the server internally does not
192
offer any queue for jobs.
194
Another group of user threads is intended for splitting the
195
queries and processing them in parallel. Let us call these
196
parallel communication threads. These threads are waiting for
197
parallelized tasks, suspended on event semaphores.
199
A single user thread waits for input from the console,
200
like a command to shut the database.
202
Utility threads are a different group of threads which takes
203
care of the buffer pool flushing and other, mainly background
204
operations, in the server.
205
Some of these utility threads always run at a lower than normal
206
priority, so that they are always in background. Some of them
207
may dynamically boost their priority by the pri_adjust function,
208
even to higher than normal priority, if their task becomes urgent.
209
The running of utilities is controlled by high- and low-water marks
210
of urgency. The urgency may be measured by the number of dirty blocks
211
in the buffer pool, in the case of the flush thread, for example.
212
When the high-water mark is exceeded, an utility starts running, until
213
the urgency drops under the low-water mark. Then the utility thread
214
suspend itself to wait for an event. The master thread is
215
responsible of signaling this event when the utility thread is
218
For each individual type of utility, some threads always remain
219
at lower than normal priority. This is because pri_adjust is implemented
220
so that the threads at normal or higher priority control their
221
share of running time by calling sleep. Thus, if the load of the
222
system sudenly drops, these threads cannot necessarily utilize
223
the system fully. The background priority threads make up for this,
224
starting to run when the load drops.
226
When there is no activity in the system, also the master thread
227
suspends itself to wait for an event making
228
the server totally silent. The responsibility to signal this
229
event is on the user thread which again receives a message
232
There is still one complication in our server design. If a
233
background utility thread obtains a resource (e.g., mutex) needed by a user
234
thread, and there is also some other user activity in the system,
235
the user thread may have to wait indefinitely long for the
236
resource, as the OS does not schedule a background thread if
237
there is some other runnable user thread. This problem is called
238
priority inversion in real-time programming.
240
One solution to the priority inversion problem would be to
241
keep record of which thread owns which resource and
242
in the above case boost the priority of the background thread
243
so that it will be scheduled and it can release the resource.
244
This solution is called priority inheritance in real-time programming.
245
A drawback of this solution is that the overhead of acquiring a mutex
246
increases slightly, maybe 0.2 microseconds on a 100 MHz Pentium, because
247
the thread has to call os_thread_get_curr_id.
248
This may be compared to 0.5 microsecond overhead for a mutex lock-unlock
249
pair. Note that the thread
250
cannot store the information in the resource, say mutex, itself,
251
because competing threads could wipe out the information if it is
252
stored before acquiring the mutex, and if it stored afterwards,
253
the information is outdated for the time of one machine instruction,
254
at least. (To be precise, the information could be stored to
255
lock_word in mutex if the machine supports atomic swap.)
257
The above solution with priority inheritance may become actual in the
258
future, but at the moment we plan to implement a more coarse solution,
259
which could be called a global priority inheritance. If a thread
260
has to wait for a long time, say 300 milliseconds, for a resource,
261
we just guess that it may be waiting for a resource owned by a background
262
thread, and boost the the priority of all runnable background threads
263
to the normal level. The background threads then themselves adjust
264
their fixed priority back to background after releasing all resources
265
they had (or, at some fixed points in their program code).
267
What is the performance of the global priority inheritance solution?
268
We may weigh the length of the wait time 300 milliseconds, during
269
which the system processes some other thread
270
to the cost of boosting the priority of each runnable background
271
thread, rescheduling it, and lowering the priority again.
272
On 100 MHz Pentium + NT this overhead may be of the order 100
273
microseconds per thread. So, if the number of runnable background
274
threads is not very big, say < 100, the cost is tolerable.
275
Utility threads probably will access resources used by
276
user threads not very often, so collisions of user threads
277
to preempted utility threads should not happen very often.
279
The thread table contains
280
information of the current status of each thread existing in the system,
281
and also the event semaphores used in suspending the master thread
282
and utility and parallel communication threads when they have nothing to do.
283
The thread table can be seen as an analogue to the process table
284
in a traditional Unix implementation.
286
The thread table is also used in the global priority inheritance
287
scheme. This brings in one additional complication: threads accessing
288
the thread table must have at least normal fixed priority,
289
because the priority inheritance solution does not work if a background
290
thread is preempted while possessing the mutex protecting the thread table.
291
So, if a thread accesses the thread table, its priority has to be
292
boosted at least to normal. This priority requirement can be seen similar to
293
the privileged mode used when processing the kernel calls in traditional
296
/* Thread slot in the thread table */
297
struct srv_slot_struct{
298
os_thread_id_t id; /* thread id */
299
os_thread_t handle; /* thread handle */
300
ulint type; /* thread type: user, utility etc. */
301
ibool in_use; /* TRUE if this slot is in use */
302
ibool suspended; /* TRUE if the thread is waiting
303
for the event of this slot */
304
ib_time_t suspend_time; /* time when the thread was
306
os_event_t event; /* event used in suspending the
307
thread when it has nothing to do */
308
que_thr_t* thr; /* suspended query thread (only
309
used for MySQL threads) */
312
/* Table for MySQL threads where they will be suspended to wait for locks */
313
srv_slot_t* srv_mysql_table = NULL;
315
os_event_t srv_lock_timeout_thread_event;
317
srv_sys_t* srv_sys = NULL;
319
byte srv_pad1[64]; /* padding to prevent other memory update
320
hotspots from residing on the same memory
322
mutex_t* kernel_mutex_temp;/* mutex protecting the server, trx structs,
323
query threads, and lock table */
324
byte srv_pad2[64]; /* padding to prevent other memory update
325
hotspots from residing on the same memory
328
/* The following three values measure the urgency of the jobs of
329
buffer, version, and insert threads. They may vary from 0 - 1000.
330
The server mutex protects all these variables. The low-water values
331
tell that the server can acquiesce the utility when the value
332
drops below this low-water mark. */
334
ulint srv_meter[SRV_MASTER + 1];
335
ulint srv_meter_low_water[SRV_MASTER + 1];
336
ulint srv_meter_high_water[SRV_MASTER + 1];
337
ulint srv_meter_high_water2[SRV_MASTER + 1];
338
ulint srv_meter_foreground[SRV_MASTER + 1];
340
/* The following values give info about the activity going on in
341
the database. They are protected by the server mutex. The arrays
342
are indexed by the type of the thread. */
344
ulint srv_n_threads_active[SRV_MASTER + 1];
345
ulint srv_n_threads[SRV_MASTER + 1];
348
/*************************************************************************
349
Accessor function to get pointer to n'th slot in the server thread
353
srv_table_get_nth_slot(
354
/*===================*/
355
/* out: pointer to the slot */
356
ulint index) /* in: index of the slot */
358
ut_a(index < OS_THREAD_MAX_N);
360
return(srv_sys->threads + index);
363
/*************************************************************************
364
Gets the number of threads in the system. */
367
srv_get_n_threads(void)
368
/*===================*/
373
mutex_enter(&kernel_mutex);
375
for (i = SRV_COM; i < SRV_MASTER + 1; i++) {
377
n_threads += srv_n_threads[i];
380
mutex_exit(&kernel_mutex);
385
/*************************************************************************
386
Reserves a slot in the thread table for the current thread. Also creates the
387
thread local storage struct for the current thread. NOTE! The server mutex
388
has to be reserved by the caller! */
391
srv_table_reserve_slot(
392
/*===================*/
393
/* out: reserved slot index */
394
ulint type) /* in: type of the thread: one of SRV_COM, ... */
400
ut_a(type <= SRV_MASTER);
403
slot = srv_table_get_nth_slot(i);
405
while (slot->in_use) {
407
slot = srv_table_get_nth_slot(i);
410
ut_a(slot->in_use == FALSE);
413
slot->suspended = FALSE;
414
slot->id = os_thread_get_curr_id();
415
slot->handle = os_thread_get_curr();
420
thr_local_set_slot_no(os_thread_get_curr_id(), i);
425
/*************************************************************************
426
Suspends the calling thread to wait for the event in its thread slot.
427
NOTE! The server mutex has to be reserved by the caller! */
430
srv_suspend_thread(void)
431
/*====================*/
432
/* out: event for the calling thread to wait */
439
ut_ad(mutex_own(&kernel_mutex));
441
slot_no = thr_local_get_slot_no(os_thread_get_curr_id());
443
if (srv_print_thread_releases) {
445
printf("Suspending thread %lu to slot %lu meter %lu\n",
446
os_thread_get_curr_id(), slot_no, srv_meter[SRV_RECOVERY]);
449
slot = srv_table_get_nth_slot(slot_no);
453
ut_ad(type >= SRV_WORKER);
454
ut_ad(type <= SRV_MASTER);
458
slot->suspended = TRUE;
460
ut_ad(srv_n_threads_active[type] > 0);
462
srv_n_threads_active[type]--;
464
os_event_reset(event);
469
/*************************************************************************
470
Releases threads of the type given from suspension in the thread table.
471
NOTE! The server mutex has to be reserved by the caller! */
476
/* out: number of threads released: this may be
477
< n if not enough threads were suspended at the
479
ulint type, /* in: thread type */
480
ulint n) /* in: number of threads to release */
486
ut_ad(type >= SRV_WORKER);
487
ut_ad(type <= SRV_MASTER);
489
ut_ad(mutex_own(&kernel_mutex));
491
for (i = 0; i < OS_THREAD_MAX_N; i++) {
493
slot = srv_table_get_nth_slot(i);
495
if ((slot->type == type) && slot->suspended) {
497
slot->suspended = FALSE;
499
srv_n_threads_active[type]++;
501
os_event_set(slot->event);
503
if (srv_print_thread_releases) {
505
"Releasing thread %lu type %lu from slot %lu meter %lu\n",
506
slot->id, type, i, srv_meter[SRV_RECOVERY]);
520
/*************************************************************************
521
Returns the calling thread type. */
524
srv_get_thread_type(void)
525
/*=====================*/
526
/* out: SRV_COM, ... */
532
mutex_enter(&kernel_mutex);
534
slot_no = thr_local_get_slot_no(os_thread_get_curr_id());
536
slot = srv_table_get_nth_slot(slot_no);
540
ut_ad(type >= SRV_WORKER);
541
ut_ad(type <= SRV_MASTER);
543
mutex_exit(&kernel_mutex);
548
/***********************************************************************
549
Increments by 1 the count of active threads of the type given
550
and releases master thread if necessary. */
553
srv_inc_thread_count(
554
/*=================*/
555
ulint type) /* in: type of the thread */
557
mutex_enter(&kernel_mutex);
559
srv_activity_count++;
561
srv_n_threads_active[type]++;
563
if (srv_n_threads_active[SRV_MASTER] == 0) {
565
srv_release_threads(SRV_MASTER, 1);
568
mutex_exit(&kernel_mutex);
571
/***********************************************************************
572
Decrements by 1 the count of active threads of the type given. */
575
srv_dec_thread_count(
576
/*=================*/
577
ulint type) /* in: type of the thread */
580
mutex_enter(&kernel_mutex);
582
/* FIXME: the following assertion sometimes fails: */
584
if (srv_n_threads_active[type] == 0) {
585
printf("Error: thread type %lu\n", type);
590
srv_n_threads_active[type]--;
592
mutex_exit(&kernel_mutex);
595
/***********************************************************************
596
Calculates the number of allowed utility threads for a thread to decide if
597
it has to suspend itself in the thread table. */
602
/* out: maximum number of allowed utilities
604
ulint type) /* in: utility type */
608
if (srv_n_threads_active[SRV_COM] == 0) {
609
if (srv_meter[type] > srv_meter_low_water[type]) {
610
return(srv_n_threads[type] / 2);
616
if (srv_meter[type] < srv_meter_foreground[type]) {
619
ret = 1 + ((srv_n_threads[type]
620
* (ulint)(srv_meter[type] - srv_meter_foreground[type]))
621
/ (ulint)(1000 - srv_meter_foreground[type]));
622
if (ret > srv_n_threads[type]) {
623
return(srv_n_threads[type]);
630
/***********************************************************************
631
Increments the utility meter by the value given and releases utility
632
threads if necessary. */
637
ulint type, /* in: utility type */
638
ulint n) /* in: value to add to meter */
642
mutex_enter(&kernel_mutex);
644
srv_meter[type] += n;
646
m = srv_max_n_utilities(type);
648
if (m > srv_n_threads_active[type]) {
650
srv_release_threads(type, m - srv_n_threads_active[type]);
653
mutex_exit(&kernel_mutex);
656
/***********************************************************************
657
Releases max number of utility threads if no queries are active and
658
the high-water mark for the utility is exceeded. */
661
srv_release_max_if_no_queries(void)
662
/*===============================*/
667
mutex_enter(&kernel_mutex);
669
if (srv_n_threads_active[SRV_COM] > 0) {
670
mutex_exit(&kernel_mutex);
677
m = srv_n_threads[type] / 2;
679
if ((srv_meter[type] > srv_meter_high_water[type])
680
&& (srv_n_threads_active[type] < m)) {
682
srv_release_threads(type, m - srv_n_threads_active[type]);
684
printf("Releasing max background\n");
687
mutex_exit(&kernel_mutex);
690
/***********************************************************************
691
Releases one utility thread if no queries are active and
692
the high-water mark 2 for the utility is exceeded. */
695
srv_release_one_if_no_queries(void)
696
/*===============================*/
701
mutex_enter(&kernel_mutex);
703
if (srv_n_threads_active[SRV_COM] > 0) {
704
mutex_exit(&kernel_mutex);
713
if ((srv_meter[type] > srv_meter_high_water2[type])
714
&& (srv_n_threads_active[type] < m)) {
716
srv_release_threads(type, m - srv_n_threads_active[type]);
718
printf("Releasing one background\n");
721
mutex_exit(&kernel_mutex);
725
/***********************************************************************
726
Decrements the utility meter by the value given and suspends the calling
727
thread, which must be an utility thread of the type given, if necessary. */
732
ulint type, /* in: utility type */
733
ulint n) /* in: value to subtract from meter */
738
mutex_enter(&kernel_mutex);
740
if (srv_meter[type] < n) {
743
srv_meter[type] -= n;
746
opt = srv_max_n_utilities(type);
748
if (opt < srv_n_threads_active[type]) {
750
event = srv_suspend_thread();
751
mutex_exit(&kernel_mutex);
753
os_event_wait(event);
755
mutex_exit(&kernel_mutex);
760
/*************************************************************************
761
Implements the server console. */
766
/* out: return code, not used */
767
void* arg) /* in: argument, not used */
773
mutex_enter(&kernel_mutex);
774
srv_table_reserve_slot(SRV_CONSOLE);
775
mutex_exit(&kernel_mutex);
777
os_event_wait(srv_sys->operational);
780
scanf("%s", command);
782
srv_inc_thread_count(SRV_CONSOLE);
784
if (command[0] == 'c') {
785
printf("Making checkpoint\n");
787
log_make_checkpoint_at(ut_dulint_max, TRUE);
789
printf("Checkpoint completed\n");
791
} else if (command[0] == 'd') {
792
srv_sim_disk_wait_pct = atoi(command + 1);
795
"Starting disk access simulation with pct %lu\n",
796
srv_sim_disk_wait_pct);
798
printf("\nNot supported!\n");
801
srv_dec_thread_count(SRV_CONSOLE);
807
/*************************************************************************
808
Creates the first communication endpoint for the server. This
809
first call also initializes the com0com.* module. */
812
srv_communication_init(
813
/*===================*/
814
char* endpoint) /* in: server address */
819
srv_sys->endpoint = com_endpoint_create(COM_SHM);
821
ut_a(srv_sys->endpoint);
823
len = ODBC_DATAGRAM_SIZE;
825
ret = com_endpoint_set_option(srv_sys->endpoint,
826
COM_OPT_MAX_DGRAM_SIZE,
827
(byte*)&len, sizeof(ulint));
830
ret = com_bind(srv_sys->endpoint, endpoint, ut_strlen(endpoint));
835
/*************************************************************************
836
Implements the recovery utility. */
841
/* out: return code, not used */
842
void* arg) /* in: not used */
849
slot_no = srv_table_reserve_slot(SRV_RECOVERY);
851
os_event_wait(srv_sys->operational);
854
/* Finish a possible recovery */
856
srv_inc_thread_count(SRV_RECOVERY);
858
/* recv_recovery_from_checkpoint_finish(); */
860
srv_dec_thread_count(SRV_RECOVERY);
862
mutex_enter(&kernel_mutex);
863
event = srv_suspend_thread();
864
mutex_exit(&kernel_mutex);
866
/* Wait for somebody to release this thread; (currently, this
867
should never be released) */
869
os_event_wait(event);
875
/*************************************************************************
876
Implements the purge utility. */
881
/* out: return code, not used */
882
void* arg) /* in: not used */
886
os_event_wait(srv_sys->operational);
895
/*************************************************************************
896
Creates the utility threads. */
899
srv_create_utility_threads(void)
900
/*============================*/
903
os_thread_id_t thr_id;
906
mutex_enter(&kernel_mutex);
908
srv_n_threads[SRV_RECOVERY] = 1;
909
srv_n_threads_active[SRV_RECOVERY] = 1;
911
mutex_exit(&kernel_mutex);
913
for (i = 0; i < 1; i++) {
914
thread = os_thread_create(srv_recovery_thread, NULL, &thr_id);
919
/* thread = os_thread_create(srv_purge_thread, NULL, &thr_id);
924
/*************************************************************************
925
Implements the communication threads. */
930
/* out: return code; not used */
931
void* arg) /* in: not used */
941
srv_table_reserve_slot(SRV_COM);
943
os_event_wait(srv_sys->operational);
945
msg_buf = mem_alloc(com_endpoint_get_max_size(srv_sys->endpoint));
946
addr_buf = mem_alloc(COM_MAX_ADDR_LEN);
949
ret = com_recvfrom(srv_sys->endpoint, msg_buf,
950
com_endpoint_get_max_size(srv_sys->endpoint),
951
&msg_len, (char*)addr_buf, COM_MAX_ADDR_LEN,
955
srv_inc_thread_count(SRV_COM);
957
sess_process_cli_msg(msg_buf, msg_len, addr_buf, addr_len);
959
/* srv_increment_meter(SRV_RECOVERY, 1); */
961
srv_dec_thread_count(SRV_COM);
963
/* Release one utility thread for each utility if
964
high water mark 2 is exceeded and there are no
965
active queries. This is done to utilize possible
966
quiet time in the server. */
968
srv_release_one_if_no_queries();
974
/*************************************************************************
975
Creates the communication threads. */
978
srv_create_com_threads(void)
979
/*========================*/
982
os_thread_id_t thr_id;
985
srv_n_threads[SRV_COM] = srv_n_com_threads;
987
for (i = 0; i < srv_n_com_threads; i++) {
988
thread = os_thread_create(srv_com_thread, NULL, &thr_id);
993
/*************************************************************************
994
Implements the worker threads. */
999
/* out: return code, not used */
1000
void* arg) /* in: not used */
1006
srv_table_reserve_slot(SRV_WORKER);
1008
os_event_wait(srv_sys->operational);
1011
mutex_enter(&kernel_mutex);
1012
event = srv_suspend_thread();
1013
mutex_exit(&kernel_mutex);
1015
/* Wait for somebody to release this thread */
1016
os_event_wait(event);
1018
srv_inc_thread_count(SRV_WORKER);
1020
/* Check in the server task queue if there is work for this
1021
thread, and do the work */
1023
srv_que_task_queue_check();
1025
srv_dec_thread_count(SRV_WORKER);
1027
/* Release one utility thread for each utility if
1028
high water mark 2 is exceeded and there are no
1029
active queries. This is done to utilize possible
1030
quiet time in the server. */
1032
srv_release_one_if_no_queries();
1038
/*************************************************************************
1039
Creates the worker threads. */
1042
srv_create_worker_threads(void)
1043
/*===========================*/
1046
os_thread_id_t thr_id;
1049
srv_n_threads[SRV_WORKER] = srv_n_worker_threads;
1050
srv_n_threads_active[SRV_WORKER] = srv_n_worker_threads;
1052
for (i = 0; i < srv_n_worker_threads; i++) {
1053
thread = os_thread_create(srv_worker_thread, NULL, &thr_id);
1059
/*************************************************************************
1060
Reads a keyword and a value from a file. */
1065
/* out: DB_SUCCESS or error code */
1066
FILE* initfile, /* in: file pointer */
1067
char* keyword, /* in: keyword before value(s), or NULL if
1069
char* str_buf, /* in/out: buffer for a string value to read,
1070
buffer size must be 10000 bytes, if NULL
1072
ulint* num_val, /* out: numerical value to read, if NULL
1074
ibool print_not_err) /* in: if TRUE, then we will not print
1075
error messages to console */
1078
char scan_buf[10000];
1080
if (keyword == NULL) {
1085
ret = fscanf(initfile, "%9999s", scan_buf);
1087
if (ret == 0 || ret == EOF || 0 != ut_strcmp(scan_buf, keyword)) {
1088
if (print_not_err) {
1093
printf("Error in Innobase booting: keyword %s not found\n",
1095
printf("from the initfile!\n");
1100
if (num_val == NULL && str_buf == NULL) {
1105
ret = fscanf(initfile, "%9999s", scan_buf);
1107
if (ret == EOF || ret == 0) {
1108
if (print_not_err) {
1114
"Error in Innobase booting: could not read first value after %s\n",
1116
printf("from the initfile!\n");
1122
ut_memcpy(str_buf, scan_buf, 10000);
1124
printf("init keyword %s value %s read\n", keyword, str_buf);
1130
ret = fscanf(initfile, "%9999s", scan_buf);
1132
if (ret == EOF || ret == 0) {
1134
if (print_not_err) {
1140
"Error in Innobase booting: could not read second value after %s\n",
1142
printf("from the initfile!\n");
1148
if (ut_strlen(scan_buf) > 9) {
1150
if (print_not_err) {
1156
"Error in Innobase booting: numerical value too big after %s\n",
1158
printf("in the initfile!\n");
1163
*num_val = (ulint)atoi(scan_buf);
1165
if (*num_val >= 1000000000) {
1167
if (print_not_err) {
1173
"Error in Innobase booting: numerical value too big after %s\n",
1175
printf("in the initfile!\n");
1180
printf("init keyword %s value %lu read\n", keyword, *num_val);
1185
/*************************************************************************
1186
Reads keywords and values from an initfile. */
1191
/* out: DB_SUCCESS or error code */
1192
FILE* initfile) /* in: file pointer */
1194
char str_buf[10000];
1202
err = srv_read_init_val(initfile, "INNOBASE_DATA_HOME_DIR",
1203
str_buf, NULL, FALSE);
1204
if (err != DB_SUCCESS) return(err);
1206
srv_data_home = ut_malloc(ut_strlen(str_buf) + 1);
1207
ut_memcpy(srv_data_home, str_buf, ut_strlen(str_buf) + 1);
1209
err = srv_read_init_val(initfile,"TABLESPACE_NUMBER_OF_DATA_FILES",
1211
if (err != DB_SUCCESS) return(err);
1213
srv_n_data_files = n;
1215
srv_data_file_names = ut_malloc(n * sizeof(char*));
1216
srv_data_file_sizes = ut_malloc(n * sizeof(ulint));
1218
for (i = 0; i < n; i++) {
1219
err = srv_read_init_val(initfile,
1220
"DATA_FILE_PATH_AND_SIZE_MB",
1221
str_buf, &ulint_val, FALSE);
1222
if (err != DB_SUCCESS) return(err);
1224
srv_data_file_names[i] = ut_malloc(ut_strlen(str_buf) + 1);
1225
ut_memcpy(srv_data_file_names[i], str_buf,
1226
ut_strlen(str_buf) + 1);
1227
srv_data_file_sizes[i] = ulint_val
1228
* ((1024 * 1024) / UNIV_PAGE_SIZE);
1231
err = srv_read_init_val(initfile,
1232
"NUMBER_OF_MIRRORED_LOG_GROUPS", NULL,
1233
&srv_n_log_groups, FALSE);
1234
if (err != DB_SUCCESS) return(err);
1236
err = srv_read_init_val(initfile,
1237
"NUMBER_OF_LOG_FILES_IN_GROUP", NULL,
1238
&srv_n_log_files, FALSE);
1239
if (err != DB_SUCCESS) return(err);
1241
err = srv_read_init_val(initfile, "LOG_FILE_SIZE_KB", NULL,
1242
&srv_log_file_size, FALSE);
1243
if (err != DB_SUCCESS) return(err);
1245
srv_log_file_size = srv_log_file_size / (UNIV_PAGE_SIZE / 1024);
1247
srv_log_group_home_dirs = ut_malloc(srv_n_log_files * sizeof(char*));
1249
for (i = 0; i < srv_n_log_groups; i++) {
1251
err = srv_read_init_val(initfile,
1252
"INNOBASE_LOG_GROUP_HOME_DIR",
1253
str_buf, NULL, FALSE);
1254
if (err != DB_SUCCESS) return(err);
1256
srv_log_group_home_dirs[i] = ut_malloc(ut_strlen(str_buf) + 1);
1257
ut_memcpy(srv_log_group_home_dirs[i], str_buf,
1258
ut_strlen(str_buf) + 1);
1261
err = srv_read_init_val(initfile, "INNOBASE_LOG_ARCH_DIR",
1262
str_buf, NULL, FALSE);
1263
if (err != DB_SUCCESS) return(err);
1265
srv_arch_dir = ut_malloc(ut_strlen(str_buf) + 1);
1266
ut_memcpy(srv_arch_dir, str_buf, ut_strlen(str_buf) + 1);
1268
err = srv_read_init_val(initfile, "LOG_ARCHIVE_ON(1/0)", NULL,
1269
&srv_log_archive_on, FALSE);
1270
if (err != DB_SUCCESS) return(err);
1272
err = srv_read_init_val(initfile, "LOG_BUFFER_SIZE_KB", NULL,
1273
&srv_log_buffer_size, FALSE);
1274
if (err != DB_SUCCESS) return(err);
1276
srv_log_buffer_size = srv_log_buffer_size / (UNIV_PAGE_SIZE / 1024);
1278
err = srv_read_init_val(initfile, "FLUSH_LOG_AT_TRX_COMMIT(1/0)", NULL,
1279
&srv_flush_log_at_trx_commit, FALSE);
1280
if (err != DB_SUCCESS) return(err);
1282
err = srv_read_init_val(initfile, "BUFFER_POOL_SIZE_MB", NULL,
1283
&srv_pool_size, FALSE);
1284
if (err != DB_SUCCESS) return(err);
1286
srv_pool_size = srv_pool_size * ((1024 * 1024) / UNIV_PAGE_SIZE);
1288
err = srv_read_init_val(initfile, "ADDITIONAL_MEM_POOL_SIZE_MB", NULL,
1289
&srv_mem_pool_size, FALSE);
1290
if (err != DB_SUCCESS) return(err);
1292
srv_mem_pool_size = srv_mem_pool_size * 1024 * 1024;
1294
srv_lock_table_size = 20 * srv_pool_size;
1296
err = srv_read_init_val(initfile, "NUMBER_OF_FILE_IO_THREADS", NULL,
1297
&srv_n_file_io_threads, FALSE);
1298
if (err != DB_SUCCESS) return(err);
1300
err = srv_read_init_val(initfile, "SRV_RECOVER_FROM_BACKUP",
1302
if (err == DB_SUCCESS) {
1303
srv_archive_recovery = TRUE;
1304
srv_archive_recovery_limit_lsn = ut_dulint_max;
1306
err = srv_read_init_val(initfile, NULL, NULL, &val1, TRUE);
1307
err = srv_read_init_val(initfile, NULL, NULL, &val2, TRUE);
1309
if (err == DB_SUCCESS) {
1310
srv_archive_recovery_limit_lsn =
1311
ut_dulint_create(val1, val2);
1315
/* err = srv_read_init_val(initfile,
1316
"SYNC_NUMBER_OF_SPIN_WAIT_ROUNDS", NULL,
1317
&srv_n_spin_wait_rounds);
1319
err = srv_read_init_val(initfile, "SYNC_SPIN_WAIT_DELAY", NULL,
1320
&srv_spin_wait_delay); */
1324
/*************************************************************************
1325
Reads keywords and a values from an initfile. In case of an error, exits
1326
from the process. */
1331
FILE* initfile) /* in: file pointer */
1333
char str_buf[10000];
1336
srv_read_init_val(initfile, FALSE, "SRV_ENDPOINT_NAME", str_buf,
1338
ut_a(ut_strlen(str_buf) < COM_MAX_ADDR_LEN);
1340
ut_memcpy(srv_endpoint_name, str_buf, COM_MAX_ADDR_LEN);
1342
srv_read_init_val(initfile, TRUE, "SRV_N_COM_THREADS", str_buf,
1343
&srv_n_com_threads);
1345
srv_read_init_val(initfile, TRUE, "SRV_N_WORKER_THREADS", str_buf,
1346
&srv_n_worker_threads);
1348
srv_read_init_val(initfile, TRUE, "SYNC_N_SPIN_WAIT_ROUNDS", str_buf,
1349
&srv_n_spin_wait_rounds);
1351
srv_read_init_val(initfile, TRUE, "SYNC_SPIN_WAIT_DELAY", str_buf,
1352
&srv_spin_wait_delay);
1354
srv_read_init_val(initfile, TRUE, "THREAD_PRIORITY_BOOST", str_buf,
1355
&srv_priority_boost);
1357
srv_read_init_val(initfile, TRUE, "N_SPACES", str_buf, &srv_n_spaces);
1358
srv_read_init_val(initfile, TRUE, "N_FILES", str_buf, &srv_n_files);
1359
srv_read_init_val(initfile, TRUE, "FILE_SIZE", str_buf,
1362
srv_read_init_val(initfile, TRUE, "N_LOG_GROUPS", str_buf,
1364
srv_read_init_val(initfile, TRUE, "N_LOG_FILES", str_buf,
1366
srv_read_init_val(initfile, TRUE, "LOG_FILE_SIZE", str_buf,
1367
&srv_log_file_size);
1368
srv_read_init_val(initfile, TRUE, "LOG_ARCHIVE_ON", str_buf,
1369
&srv_log_archive_on);
1370
srv_read_init_val(initfile, TRUE, "LOG_BUFFER_SIZE", str_buf,
1371
&srv_log_buffer_size);
1372
srv_read_init_val(initfile, TRUE, "FLUSH_LOG_AT_TRX_COMMIT", str_buf,
1373
&srv_flush_log_at_trx_commit);
1376
srv_read_init_val(initfile, TRUE, "POOL_SIZE", str_buf,
1378
srv_read_init_val(initfile, TRUE, "MEM_POOL_SIZE", str_buf,
1379
&srv_mem_pool_size);
1380
srv_read_init_val(initfile, TRUE, "LOCK_TABLE_SIZE", str_buf,
1381
&srv_lock_table_size);
1383
srv_read_init_val(initfile, TRUE, "SIM_DISK_WAIT_PCT", str_buf,
1384
&srv_sim_disk_wait_pct);
1386
srv_read_init_val(initfile, TRUE, "SIM_DISK_WAIT_LEN", str_buf,
1387
&srv_sim_disk_wait_len);
1389
srv_read_init_val(initfile, TRUE, "SIM_DISK_WAIT_BY_YIELD", str_buf,
1390
&srv_sim_disk_wait_by_yield);
1392
srv_read_init_val(initfile, TRUE, "SIM_DISK_WAIT_BY_WAIT", str_buf,
1393
&srv_sim_disk_wait_by_wait);
1395
srv_read_init_val(initfile, TRUE, "MEASURE_CONTENTION", str_buf,
1396
&srv_measure_contention);
1398
srv_read_init_val(initfile, TRUE, "MEASURE_BY_SPIN", str_buf,
1399
&srv_measure_by_spin);
1402
srv_read_init_val(initfile, TRUE, "PRINT_THREAD_RELEASES", str_buf,
1403
&srv_print_thread_releases);
1405
srv_read_init_val(initfile, TRUE, "PRINT_LOCK_WAITS", str_buf,
1406
&srv_print_lock_waits);
1407
if (srv_print_lock_waits) {
1408
lock_print_waits = TRUE;
1411
srv_read_init_val(initfile, TRUE, "PRINT_BUF_IO", str_buf,
1413
if (srv_print_buf_io) {
1414
buf_debug_prints = TRUE;
1417
srv_read_init_val(initfile, TRUE, "PRINT_LOG_IO", str_buf,
1419
if (srv_print_log_io) {
1420
log_debug_writes = TRUE;
1423
srv_read_init_val(initfile, TRUE, "PRINT_PARSED_SQL", str_buf,
1424
&srv_print_parsed_sql);
1425
if (srv_print_parsed_sql) {
1426
pars_print_lexed = TRUE;
1429
srv_read_init_val(initfile, TRUE, "PRINT_LATCH_WAITS", str_buf,
1430
&srv_print_latch_waits);
1432
srv_read_init_val(initfile, TRUE, "TEST_EXTRA_MUTEXES", str_buf,
1433
&srv_test_extra_mutexes);
1434
srv_read_init_val(initfile, TRUE, "TEST_NOCACHE", str_buf,
1436
srv_read_init_val(initfile, TRUE, "TEST_CACHE_EVICT", str_buf,
1437
&srv_test_cache_evict);
1439
srv_read_init_val(initfile, TRUE, "TEST_SYNC", str_buf,
1441
srv_read_init_val(initfile, TRUE, "TEST_N_THREADS", str_buf,
1442
&srv_test_n_threads);
1443
srv_read_init_val(initfile, TRUE, "TEST_N_LOOPS", str_buf,
1445
srv_read_init_val(initfile, TRUE, "TEST_N_FREE_RNDS", str_buf,
1446
&srv_test_n_free_rnds);
1447
srv_read_init_val(initfile, TRUE, "TEST_N_RESERVED_RNDS", str_buf,
1448
&srv_test_n_reserved_rnds);
1449
srv_read_init_val(initfile, TRUE, "TEST_N_MUTEXES", str_buf,
1450
&srv_test_n_mutexes);
1451
srv_read_init_val(initfile, TRUE, "TEST_ARRAY_SIZE", str_buf,
1452
&srv_test_array_size);
1456
/*************************************************************************
1457
Initializes the server. */
1466
srv_sys = mem_alloc(sizeof(srv_sys_t));
1468
kernel_mutex_temp = mem_alloc(sizeof(mutex_t));
1469
mutex_create(&kernel_mutex);
1470
mutex_set_level(&kernel_mutex, SYNC_KERNEL);
1472
srv_sys->threads = mem_alloc(OS_THREAD_MAX_N * sizeof(srv_slot_t));
1474
for (i = 0; i < OS_THREAD_MAX_N; i++) {
1475
slot = srv_table_get_nth_slot(i);
1476
slot->in_use = FALSE;
1477
slot->event = os_event_create(NULL);
1481
srv_mysql_table = mem_alloc(OS_THREAD_MAX_N * sizeof(srv_slot_t));
1483
for (i = 0; i < OS_THREAD_MAX_N; i++) {
1484
slot = srv_mysql_table + i;
1485
slot->in_use = FALSE;
1486
slot->event = os_event_create(NULL);
1490
srv_lock_timeout_thread_event = os_event_create(NULL);
1492
for (i = 0; i < SRV_MASTER + 1; i++) {
1493
srv_n_threads_active[i] = 0;
1494
srv_n_threads[i] = 0;
1496
srv_meter_low_water[i] = 50;
1497
srv_meter_high_water[i] = 100;
1498
srv_meter_high_water2[i] = 200;
1499
srv_meter_foreground[i] = 250;
1502
srv_sys->operational = os_event_create(NULL);
1504
ut_a(srv_sys->operational);
1506
UT_LIST_INIT(srv_sys->tasks);
1509
/*************************************************************************
1510
Initializes the synchronization primitives, memory system, and the thread
1514
srv_general_init(void)
1515
/*==================*/
1518
mem_init(srv_mem_pool_size);
1522
/*************************************************************************
1523
Normalizes init parameter values to use units we use inside Innobase. */
1526
srv_normalize_init_values(void)
1527
/*===========================*/
1528
/* out: DB_SUCCESS or error code */
1533
n = srv_n_data_files;
1535
for (i = 0; i < n; i++) {
1536
srv_data_file_sizes[i] = srv_data_file_sizes[i]
1537
* ((1024 * 1024) / UNIV_PAGE_SIZE);
1540
srv_log_file_size = srv_log_file_size / UNIV_PAGE_SIZE;
1542
srv_log_buffer_size = srv_log_buffer_size / UNIV_PAGE_SIZE;
1544
srv_pool_size = srv_pool_size / UNIV_PAGE_SIZE;
1546
srv_lock_table_size = 20 * srv_pool_size;
1551
/*************************************************************************
1552
Boots the Innobase server. */
1557
/* out: DB_SUCCESS or error code */
1561
/* Transform the init parameter values given by MySQL to
1562
use units we use inside Innobase: */
1564
err = srv_normalize_init_values();
1566
if (err != DB_SUCCESS) {
1570
/* Initialize synchronization primitives, memory management, and thread
1575
/* Initialize this module */
1579
/* Reserve the first slot for the current thread, i.e., the master
1582
srv_table_reserve_slot(SRV_MASTER);
1587
/*************************************************************************
1588
Reserves a slot in the thread table for the current MySQL OS thread.
1589
NOTE! The server mutex has to be reserved by the caller! */
1592
srv_table_reserve_slot_for_mysql(void)
1593
/*==================================*/
1594
/* out: reserved slot */
1600
slot = srv_mysql_table + i;
1602
while (slot->in_use) {
1604
ut_a(i < OS_THREAD_MAX_N);
1606
slot = srv_mysql_table + i;
1609
ut_a(slot->in_use == FALSE);
1611
slot->in_use = TRUE;
1612
slot->id = os_thread_get_curr_id();
1613
slot->handle = os_thread_get_curr();
1618
/*******************************************************************
1619
Puts a MySQL OS thread to wait for a lock to be released. */
1622
srv_suspend_mysql_thread(
1623
/*=====================*/
1624
/* out: TRUE if the lock wait timeout was
1626
que_thr_t* thr) /* in: query thread associated with
1627
the MySQL OS thread */
1633
ut_ad(!mutex_own(&kernel_mutex));
1635
os_event_set(srv_lock_timeout_thread_event);
1637
mutex_enter(&kernel_mutex);
1639
if (thr->state == QUE_THR_RUNNING) {
1641
/* The lock has already been released: no need to suspend */
1643
mutex_exit(&kernel_mutex);
1648
slot = srv_table_reserve_slot_for_mysql();
1650
event = slot->event;
1654
os_event_reset(event);
1656
slot->suspend_time = ut_time();
1658
/* Wake the lock timeout monitor thread, if it is suspended */
1660
os_event_set(srv_lock_timeout_thread_event);
1662
mutex_exit(&kernel_mutex);
1664
/* Wait for the release */
1666
os_event_wait(event);
1668
mutex_enter(&kernel_mutex);
1670
/* Release the slot for others to use */
1672
slot->in_use = FALSE;
1674
wait_time = ut_difftime(ut_time(), slot->suspend_time);
1676
mutex_exit(&kernel_mutex);
1678
if (srv_lock_wait_timeout < 100000000 &&
1679
wait_time > (double)srv_lock_wait_timeout) {
1686
/************************************************************************
1687
Releases a MySQL OS thread waiting for a lock to be released, if the
1688
thread is already suspended. */
1691
srv_release_mysql_thread_if_suspended(
1692
/*==================================*/
1693
que_thr_t* thr) /* in: query thread associated with the
1699
ut_ad(mutex_own(&kernel_mutex));
1701
for (i = 0; i < OS_THREAD_MAX_N; i++) {
1703
slot = srv_mysql_table + i;
1705
if (slot->in_use && slot->thr == thr) {
1708
os_event_set(slot->event);
1717
/*************************************************************************
1718
A thread which wakes up threads whose lock wait may have lasted too long. */
1721
srv_lock_timeout_monitor_thread(
1722
/*============================*/
1723
/* out: a dummy parameter */
1724
void* arg) /* in: a dummy parameter required by
1734
/* When someone is waiting for a lock, we wake up every second
1735
and check if a timeout has passed for a lock wait */
1737
os_thread_sleep(1000000);
1739
mutex_enter(&kernel_mutex);
1743
/* Check of all slots if a thread is waiting there, and if it
1744
has exceeded the time limit */
1746
for (i = 0; i < OS_THREAD_MAX_N; i++) {
1748
slot = srv_mysql_table + i;
1753
wait_time = ut_difftime(ut_time(), slot->suspend_time);
1755
if (srv_lock_wait_timeout < 100000000 &&
1756
(wait_time > (double) srv_lock_wait_timeout
1757
|| wait_time < 0)) {
1759
/* Timeout exceeded or a wrap over in system
1760
time counter: cancel the lock request queued
1761
by the transaction; NOTE that currently only
1762
a record lock request can be waiting in
1766
thr_get_trx(slot->thr)->wait_lock);
1771
os_event_reset(srv_lock_timeout_thread_event);
1773
mutex_exit(&kernel_mutex);
1779
/* No one was waiting for a lock: suspend this thread */
1781
os_event_wait(srv_lock_timeout_thread_event);
1788
/***********************************************************************
1789
Tells the Innobase server that there has been activity in the database
1790
and wakes up the master thread if it is suspended (not sleeping). Used
1791
in the MySQL interface. Note that there is a small chance that the master
1792
thread stays suspended (we do not protect our operation with the kernel
1793
mutex, for performace reasons). */
1796
srv_active_wake_master_thread(void)
1797
/*===============================*/
1799
srv_activity_count++;
1801
if (srv_n_threads_active[SRV_MASTER] == 0) {
1803
mutex_enter(&kernel_mutex);
1805
srv_release_threads(SRV_MASTER, 1);
1807
mutex_exit(&kernel_mutex);
1811
/*************************************************************************
1812
The master thread controlling the server. */
1817
/* out: a dummy parameter */
1818
void* arg) /* in: a dummy parameter required by
1822
ulint old_activity_count;
1823
ulint n_pages_purged;
1824
ulint n_bytes_merged;
1825
ulint n_pages_flushed;
1826
ulint n_bytes_archived;
1831
srv_table_reserve_slot(SRV_MASTER);
1833
mutex_enter(&kernel_mutex);
1835
srv_n_threads_active[SRV_MASTER]++;
1837
mutex_exit(&kernel_mutex);
1839
os_event_set(srv_sys->operational);
1841
mutex_enter(&kernel_mutex);
1843
old_activity_count = srv_activity_count;
1845
mutex_exit(&kernel_mutex);
1847
/* We run purge every 10 seconds, even if the server were active: */
1849
for (i = 0; i < 10; i++) {
1850
os_thread_sleep(1000000);
1852
if (srv_activity_count == old_activity_count) {
1854
if (srv_print_thread_releases) {
1855
printf("Master thread wakes up!\n");
1858
goto background_loop;
1862
if (srv_print_thread_releases) {
1863
printf("Master thread wakes up!\n");
1868
while (n_pages_purged) {
1869
n_pages_purged = trx_purge();
1870
/* TODO: replace this by a check if we are running
1871
out of file space! */
1875
/* In this loop we run background operations while the server
1878
mutex_enter(&kernel_mutex);
1879
if (srv_activity_count != old_activity_count) {
1880
mutex_exit(&kernel_mutex);
1883
old_activity_count = srv_activity_count;
1884
mutex_exit(&kernel_mutex);
1886
/* The server has been quiet for a while: start running background
1889
n_pages_purged = trx_purge();
1891
mutex_enter(&kernel_mutex);
1892
if (srv_activity_count != old_activity_count) {
1893
mutex_exit(&kernel_mutex);
1896
mutex_exit(&kernel_mutex);
1898
n_bytes_merged = ibuf_contract(TRUE);
1900
mutex_enter(&kernel_mutex);
1901
if (srv_activity_count != old_activity_count) {
1902
mutex_exit(&kernel_mutex);
1905
mutex_exit(&kernel_mutex);
1907
n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, 20, ut_dulint_max);
1909
mutex_enter(&kernel_mutex);
1910
if (srv_activity_count != old_activity_count) {
1911
mutex_exit(&kernel_mutex);
1914
mutex_exit(&kernel_mutex);
1916
buf_flush_wait_batch_end(BUF_FLUSH_LIST);
1918
log_checkpoint(TRUE, FALSE);
1920
mutex_enter(&kernel_mutex);
1921
if (srv_activity_count != old_activity_count) {
1922
mutex_exit(&kernel_mutex);
1925
mutex_exit(&kernel_mutex);
1927
log_archive_do(FALSE, &n_bytes_archived);
1929
if (n_pages_purged + n_bytes_merged + n_pages_flushed
1930
+ n_bytes_archived != 0) {
1931
goto background_loop;
1934
/* mem_print_new_info();
1938
#ifdef UNIV_SEARCH_PERF_STAT
1939
/* btr_search_print_info(); */
1941
/* There is no work for background operations either: suspend
1942
master thread to wait for more server activity */
1944
mutex_enter(&kernel_mutex);
1946
event = srv_suspend_thread();
1948
mutex_exit(&kernel_mutex);
1950
os_event_wait(event);