1
/* Copyright (C) 2003 MySQL AB
3
This program is free software; you can redistribute it and/or modify
4
it under the terms of the GNU General Public License as published by
5
the Free Software Foundation; version 2 of the License.
7
This program is distributed in the hope that it will be useful,
8
but WITHOUT ANY WARRANTY; without even the implied warranty of
9
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
GNU General Public License for more details.
12
You should have received a copy of the GNU General Public License
13
along with this program; if not, write to the Free Software
14
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
18
#define DBTUP_PAGE_MAP_CPP
20
#include <RefConvert.hpp>
21
#include <ndb_limits.h>
25
// PageMap is a service used by Dbtup to map logical page id's to physical
26
// page id's. The mapping is needs the fragment and the logical page id to
27
// provide the physical id.
29
// This is a part of Dbtup which is the exclusive user of a certain set of
30
// variables on the fragment record and it is the exclusive user of the
31
// struct for page ranges.
34
// The following methods operate on the data handled by the page map class.
37
// insertPageRange(Uint32 startPageId, # In
38
// Uint32 noPages) # In
39
// Inserts a range of pages into the mapping structure.
41
// void releaseFragPages()
42
// Releases all pages and their mappings belonging to a fragment.
44
// Uint32 allocFragPages(Uint32 tafpNoAllocRequested)
45
// Allocate a set of pages to the fragment from the page manager
47
// Uint32 getEmptyPage()
48
// Get an empty page from the pool of empty pages on the fragment.
49
// It returns the physical page id of the empty page.
50
// Returns RNIL if no empty page is available.
52
// Uint32 getRealpid(Uint32 logicalPageId)
53
// Return the physical page id provided the logical page id
55
// void initializePageRange()
56
// Initialise free list of page ranges and initialise the page raneg records.
58
// void initFragRange()
59
// Initialise the fragment variables when allocating a fragment to a table.
61
// void initPageRangeSize(Uint32 size)
62
// Initialise the number of page ranges.
64
// Uint32 getNoOfPages()
65
// Get the number of pages on the fragment currently.
69
// Uint32 leafPageRangeFull(PageRangePtr currPageRangePtr)
71
// void errorHandler()
72
// Method to crash NDB kernel in case of weird data set-up
74
// void allocMoreFragPages()
75
// When no more empty pages are attached to the fragment and we need more
76
// we allocate more pages from the page manager using this method.
79
// On the fragment record
80
// currentPageRange # The current page range where to insert the next range
81
// rootPageRange # The root of the page ranges owned
82
// nextStartRange # The next page id to assign when expanding the
84
// noOfPages # The number of pages in the fragment
85
// emptyPrimPage # The first page of the empty pages in the fragment
87
// The full page range struct
89
Uint32 Dbtup::getEmptyPage(Fragrecord* regFragPtr)
91
Uint32 pageId = regFragPtr->emptyPrimPage.firstItem;
94
allocMoreFragPages(regFragPtr);
95
pageId = regFragPtr->emptyPrimPage.firstItem;
102
LocalDLList<Page> alloc_pages(c_page_pool, regFragPtr->emptyPrimPage);
103
alloc_pages.getPtr(pagePtr, pageId);
104
alloc_pages.remove(pagePtr);
106
}//Dbtup::getEmptyPage()
108
Uint32 Dbtup::getRealpid(Fragrecord* regFragPtr, Uint32 logicalPageId)
110
PageRangePtr grpPageRangePtr;
112
Uint32 loopCount = 0;
113
Uint32 pageRangeLimit = cnoOfPageRangeRec;
114
ndbassert(logicalPageId < getNoOfPages(regFragPtr));
115
grpPageRangePtr.i = regFragPtr->rootPageRange;
117
ndbassert(loopCount++ < 100);
118
ndbrequire(grpPageRangePtr.i < pageRangeLimit);
119
ptrAss(grpPageRangePtr, pageRange);
120
loopLimit = grpPageRangePtr.p->currentIndexPos;
121
ndbrequire(loopLimit <= 3);
122
for (Uint32 i = 0; i <= loopLimit; i++) {
124
if (grpPageRangePtr.p->startRange[i] <= logicalPageId) {
125
if (grpPageRangePtr.p->endRange[i] >= logicalPageId) {
126
if (grpPageRangePtr.p->type[i] == ZLEAF) {
128
Uint32 realPageId = (logicalPageId - grpPageRangePtr.p->startRange[i]) +
129
grpPageRangePtr.p->basePageId[i];
132
ndbrequire(grpPageRangePtr.p->type[i] == ZNON_LEAF);
133
grpPageRangePtr.i = grpPageRangePtr.p->basePageId[i];
140
}//Dbtup::getRealpid()
142
Uint32 Dbtup::getNoOfPages(Fragrecord* const regFragPtr)
144
return regFragPtr->noOfPages;
145
}//Dbtup::getNoOfPages()
147
void Dbtup::initPageRangeSize(Uint32 size)
149
cnoOfPageRangeRec = size;
150
}//Dbtup::initPageRangeSize()
152
/* ---------------------------------------------------------------- */
153
/* ----------------------- INSERT_PAGE_RANGE_TAB ------------------ */
154
/* ---------------------------------------------------------------- */
155
/* INSERT A PAGE RANGE INTO THE FRAGMENT */
157
/* NOTE: THE METHOD IS ATOMIC. EITHER THE ACTION IS */
158
/* PERFORMED FULLY OR NO ACTION IS PERFORMED AT ALL. */
159
/* TO SUPPORT THIS THE CODE HAS A CLEANUP PART AFTER */
161
/* ---------------------------------------------------------------- */
162
bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr,
166
PageRangePtr currPageRangePtr;
167
if (cfirstfreerange == RNIL) {
171
currPageRangePtr.i = regFragPtr->currentPageRange;
172
if (currPageRangePtr.i == RNIL) {
174
/* ---------------------------------------------------------------- */
175
/* THE FIRST PAGE RANGE IS HANDLED WITH SPECIAL CODE */
176
/* ---------------------------------------------------------------- */
177
seizePagerange(currPageRangePtr);
178
regFragPtr->rootPageRange = currPageRangePtr.i;
179
currPageRangePtr.p->currentIndexPos = 0;
180
currPageRangePtr.p->parentPtr = RNIL;
183
ptrCheckGuard(currPageRangePtr, cnoOfPageRangeRec, pageRange);
184
if (currPageRangePtr.p->currentIndexPos < 3) {
186
/* ---------------------------------------------------------------- */
187
/* THE SIMPLE CASE WHEN IT IS ONLY NECESSARY TO FILL IN THE */
188
/* NEXT EMPTY POSITION IN THE PAGE RANGE RECORD IS TREATED */
189
/* BY COMMON CODE AT THE END OF THE SUBROUTINE. */
190
/* ---------------------------------------------------------------- */
191
currPageRangePtr.p->currentIndexPos++;
194
ndbrequire(currPageRangePtr.p->currentIndexPos == 3);
195
currPageRangePtr.i = leafPageRangeFull(regFragPtr, currPageRangePtr);
196
if (currPageRangePtr.i == RNIL) {
199
ptrCheckGuard(currPageRangePtr, cnoOfPageRangeRec, pageRange);
202
currPageRangePtr.p->startRange[currPageRangePtr.p->currentIndexPos] = regFragPtr->nextStartRange;
203
/* ---------------------------------------------------------------- */
204
/* NOW SET THE LEAF LEVEL PAGE RANGE RECORD PROPERLY */
205
/* PAGE_RANGE_PTR REFERS TO LEAF RECORD WHEN ARRIVING HERE */
206
/* ---------------------------------------------------------------- */
207
currPageRangePtr.p->endRange[currPageRangePtr.p->currentIndexPos] =
208
(regFragPtr->nextStartRange + noPages) - 1;
209
currPageRangePtr.p->basePageId[currPageRangePtr.p->currentIndexPos] = startPageId;
210
currPageRangePtr.p->type[currPageRangePtr.p->currentIndexPos] = ZLEAF;
211
/* ---------------------------------------------------------------- */
212
/* WE NEED TO UPDATE THE CURRENT PAGE RANGE IN CASE IT HAS */
213
/* CHANGED. WE ALSO NEED TO UPDATE THE NEXT START RANGE */
214
/* ---------------------------------------------------------------- */
215
regFragPtr->currentPageRange = currPageRangePtr.i;
216
regFragPtr->nextStartRange += noPages;
217
/* ---------------------------------------------------------------- */
218
/* WE NEED TO UPDATE THE END RANGE IN ALL PAGE RANGE RECORDS */
219
/* UP TO THE ROOT. */
220
/* ---------------------------------------------------------------- */
221
PageRangePtr loopPageRangePtr;
222
loopPageRangePtr = currPageRangePtr;
225
loopPageRangePtr.i = loopPageRangePtr.p->parentPtr;
226
if (loopPageRangePtr.i != RNIL) {
228
ptrCheckGuard(loopPageRangePtr, cnoOfPageRangeRec, pageRange);
229
ndbrequire(loopPageRangePtr.p->currentIndexPos < 4);
230
loopPageRangePtr.p->endRange[loopPageRangePtr.p->currentIndexPos] += noPages;
236
regFragPtr->noOfPages += noPages;
238
}//Dbtup::insertPageRangeTab()
241
void Dbtup::releaseFragPages(Fragrecord* regFragPtr)
243
if (regFragPtr->rootPageRange == RNIL) {
247
PageRangePtr regPRPtr;
248
regPRPtr.i = regFragPtr->rootPageRange;
249
ptrCheckGuard(regPRPtr, cnoOfPageRangeRec, pageRange);
252
const Uint32 indexPos = regPRPtr.p->currentIndexPos;
253
ndbrequire(indexPos < 4);
255
const Uint32 basePageId = regPRPtr.p->basePageId[indexPos];
256
regPRPtr.p->basePageId[indexPos] = RNIL;
257
if (basePageId == RNIL) {
260
* Finished with indexPos continue with next
264
regPRPtr.p->currentIndexPos--;
268
/* ---------------------------------------------------------------- */
269
/* THE PAGE RANGE REC IS EMPTY. RELEASE IT. */
270
/*----------------------------------------------------------------- */
271
Uint32 parentPtr = regPRPtr.p->parentPtr;
272
releasePagerange(regPRPtr);
274
if (parentPtr != RNIL) {
276
regPRPtr.i = parentPtr;
277
ptrCheckGuard(regPRPtr, cnoOfPageRangeRec, pageRange);
282
ndbrequire(regPRPtr.i == regFragPtr->rootPageRange);
283
initFragRange(regFragPtr);
284
for (Uint32 i = 0; i<MAX_FREE_LIST; i++)
286
LocalDLList<Page> tmp(c_page_pool, regFragPtr->free_var_page_array[i]);
291
LocalDLList<Page> tmp(c_page_pool, regFragPtr->emptyPrimPage);
296
LocalDLFifoList<Page> tmp(c_page_pool, regFragPtr->thFreeFirst);
301
LocalSLList<Page> tmp(c_page_pool, regFragPtr->m_empty_pages);
307
if (regPRPtr.p->type[indexPos] == ZNON_LEAF) {
309
/* ---------------------------------------------------------------- */
310
// A non-leaf node, we must release everything below it before we
311
// release this node.
312
/* ---------------------------------------------------------------- */
313
regPRPtr.i = basePageId;
314
ptrCheckGuard(regPRPtr, cnoOfPageRangeRec, pageRange);
317
ndbrequire(regPRPtr.p->type[indexPos] == ZLEAF);
318
/* ---------------------------------------------------------------- */
319
/* PAGE_RANGE_PTR /= RNIL AND THE CURRENT POS IS NOT A CHLED. */
320
/*----------------------------------------------------------------- */
321
const Uint32 start = regPRPtr.p->startRange[indexPos];
322
const Uint32 stop = regPRPtr.p->endRange[indexPos];
323
ndbrequire(stop >= start);
324
const Uint32 retNo = (stop - start + 1);
325
returnCommonArea(basePageId, retNo);
329
}//Dbtup::releaseFragPages()
331
void Dbtup::initializePageRange()
333
PageRangePtr regPTRPtr;
334
for (regPTRPtr.i = 0;
335
regPTRPtr.i < cnoOfPageRangeRec; regPTRPtr.i++) {
336
ptrAss(regPTRPtr, pageRange);
337
regPTRPtr.p->nextFree = regPTRPtr.i + 1;
339
regPTRPtr.i = cnoOfPageRangeRec - 1;
340
ptrAss(regPTRPtr, pageRange);
341
regPTRPtr.p->nextFree = RNIL;
343
c_noOfFreePageRanges = cnoOfPageRangeRec;
344
}//Dbtup::initializePageRange()
346
void Dbtup::initFragRange(Fragrecord* const regFragPtr)
348
regFragPtr->rootPageRange = RNIL;
349
regFragPtr->currentPageRange = RNIL;
350
regFragPtr->noOfPages = 0;
351
regFragPtr->noOfVarPages = 0;
352
regFragPtr->noOfPagesToGrow = 2;
353
regFragPtr->nextStartRange = 0;
356
Uint32 Dbtup::allocFragPages(Fragrecord* regFragPtr, Uint32 tafpNoAllocRequested)
358
Uint32 tafpPagesAllocated = 0;
360
Uint32 noOfPagesAllocated = 0;
361
Uint32 noPagesToAllocate = tafpNoAllocRequested - tafpPagesAllocated;
362
Uint32 retPageRef = RNIL;
363
allocConsPages(noPagesToAllocate, noOfPagesAllocated, retPageRef);
364
if (noOfPagesAllocated == 0) {
366
return tafpPagesAllocated;
368
/* ---------------------------------------------------------------- */
369
/* IT IS NOW TIME TO PUT THE ALLOCATED AREA INTO THE PAGE */
371
/* ---------------------------------------------------------------- */
372
Uint32 startRange = regFragPtr->nextStartRange;
373
if (!insertPageRangeTab(regFragPtr, retPageRef, noOfPagesAllocated)) {
375
returnCommonArea(retPageRef, noOfPagesAllocated);
376
return tafpPagesAllocated;
378
tafpPagesAllocated += noOfPagesAllocated;
379
Uint32 loopLimit = retPageRef + noOfPagesAllocated;
381
/* ---------------------------------------------------------------- */
382
/* SINCE A NUMBER OF PAGES WERE ALLOCATED FROM COMMON AREA */
383
/* WITH SUCCESS IT IS NOW TIME TO CHANGE THE STATE OF */
384
/* THOSE PAGES TO EMPTY_MM AND LINK THEM INTO THE EMPTY */
385
/* PAGE LIST OF THE FRAGMENT. */
386
/* ---------------------------------------------------------------- */
388
for (loopPagePtr.i = retPageRef; loopPagePtr.i < loopLimit; loopPagePtr.i++) {
390
c_page_pool.getPtr(loopPagePtr);
391
loopPagePtr.p->page_state = ZEMPTY_MM;
392
loopPagePtr.p->frag_page_id = startRange +
393
(loopPagePtr.i - retPageRef);
394
loopPagePtr.p->physical_page_id = loopPagePtr.i;
395
loopPagePtr.p->nextList = loopPagePtr.i + 1;
396
loopPagePtr.p->prevList = prev;
397
prev = loopPagePtr.i;
400
ndbassert(loopPagePtr.p == c_page_pool.getPtr(loopPagePtr.i));
401
loopPagePtr.p->nextList = RNIL;
403
LocalDLList<Page> alloc(c_page_pool, regFragPtr->emptyPrimPage);
404
if (noOfPagesAllocated > 1)
406
alloc.add(retPageRef, loopPagePtr);
410
alloc.add(loopPagePtr);
413
/* ---------------------------------------------------------------- */
414
/* WAS ENOUGH PAGES ALLOCATED OR ARE MORE NEEDED. */
415
/* ---------------------------------------------------------------- */
416
if (tafpPagesAllocated < tafpNoAllocRequested) {
419
ndbrequire(tafpPagesAllocated == tafpNoAllocRequested);
421
return tafpNoAllocRequested;
424
}//Dbtup::allocFragPages()
426
void Dbtup::allocMoreFragPages(Fragrecord* const regFragPtr)
428
Uint32 noAllocPages = regFragPtr->noOfPagesToGrow >> 3; // 12.5%
429
noAllocPages += regFragPtr->noOfPagesToGrow >> 4; // 6.25%
431
/* -----------------------------------------------------------------*/
432
// We will grow by 18.75% plus two more additional pages to grow
433
// a little bit quicker in the beginning.
434
/* -----------------------------------------------------------------*/
436
if (noAllocPages > m_max_allocate_pages)
438
noAllocPages = m_max_allocate_pages;
440
Uint32 allocated = allocFragPages(regFragPtr, noAllocPages);
441
regFragPtr->noOfPagesToGrow += allocated;
442
}//Dbtup::allocMoreFragPages()
444
Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr currPageRangePtr)
446
/* ---------------------------------------------------------------- */
447
/* THE COMPLEX CASE WHEN THE LEAF NODE IS FULL. GO UP THE TREE*/
448
/* TO FIND THE FIRST RECORD WITH A FREE ENTRY. ALLOCATE NEW */
449
/* PAGE RANGE RECORDS THEN ALL THE WAY DOWN TO THE LEAF LEVEL */
450
/* AGAIN. THE TREE SHOULD ALWAYS REMAIN BALANCED. */
451
/* ---------------------------------------------------------------- */
452
PageRangePtr parentPageRangePtr;
453
PageRangePtr foundPageRangePtr;
454
parentPageRangePtr = currPageRangePtr;
455
Uint32 tiprNoLevels = 1;
458
parentPageRangePtr.i = parentPageRangePtr.p->parentPtr;
459
if (parentPageRangePtr.i == RNIL) {
461
/* ---------------------------------------------------------------- */
462
/* WE HAVE REACHED THE ROOT. A NEW ROOT MUST BE ALLOCATED. */
463
/* ---------------------------------------------------------------- */
464
if (c_noOfFreePageRanges < tiprNoLevels) {
468
PageRangePtr oldRootPRPtr;
469
PageRangePtr newRootPRPtr;
470
oldRootPRPtr.i = regFragPtr->rootPageRange;
471
ptrCheckGuard(oldRootPRPtr, cnoOfPageRangeRec, pageRange);
472
seizePagerange(newRootPRPtr);
473
regFragPtr->rootPageRange = newRootPRPtr.i;
474
oldRootPRPtr.p->parentPtr = newRootPRPtr.i;
476
newRootPRPtr.p->basePageId[0] = oldRootPRPtr.i;
477
newRootPRPtr.p->parentPtr = RNIL;
478
newRootPRPtr.p->startRange[0] = 0;
479
newRootPRPtr.p->endRange[0] = regFragPtr->nextStartRange - 1;
480
newRootPRPtr.p->type[0] = ZNON_LEAF;
481
newRootPRPtr.p->startRange[1] = regFragPtr->nextStartRange;
482
newRootPRPtr.p->endRange[1] = regFragPtr->nextStartRange - 1;
483
newRootPRPtr.p->type[1] = ZNON_LEAF;
484
newRootPRPtr.p->currentIndexPos = 1;
485
foundPageRangePtr = newRootPRPtr;
489
ptrCheckGuard(parentPageRangePtr, cnoOfPageRangeRec, pageRange);
490
if (parentPageRangePtr.p->currentIndexPos < 3) {
493
if (c_noOfFreePageRanges < tiprNoLevels)
499
/* ---------------------------------------------------------------- */
500
/* WE HAVE FOUND AN EMPTY ENTRY IN A PAGE RANGE RECORD. */
501
/* ALLOCATE A NEW PAGE RANGE RECORD, FILL IN THE START RANGE, */
502
/* ALLOCATE A NEW PAGE RANGE RECORD AND UPDATE THE POINTERS */
503
/* ---------------------------------------------------------------- */
504
parentPageRangePtr.p->currentIndexPos++;
505
parentPageRangePtr.p->startRange[parentPageRangePtr.p->currentIndexPos] = regFragPtr->nextStartRange;
506
parentPageRangePtr.p->endRange[parentPageRangePtr.p->currentIndexPos] = regFragPtr->nextStartRange - 1;
507
parentPageRangePtr.p->type[parentPageRangePtr.p->currentIndexPos] = ZNON_LEAF;
508
foundPageRangePtr = parentPageRangePtr;
512
ndbrequire(parentPageRangePtr.p->currentIndexPos == 3);
513
/* ---------------------------------------------------------------- */
514
/* THE PAGE RANGE RECORD WAS FULL. FIND THE PARENT RECORD */
515
/* AND INCREASE THE NUMBER OF LEVELS WE HAVE TRAVERSED */
516
/* GOING UP THE TREE. */
517
/* ---------------------------------------------------------------- */
522
/* ---------------------------------------------------------------- */
523
/* REMEMBER THE ERROR LEVEL IN CASE OF ALLOCATION ERRORS */
524
/* ---------------------------------------------------------------- */
525
PageRangePtr newPageRangePtr;
526
PageRangePtr prevPageRangePtr;
527
prevPageRangePtr = foundPageRangePtr;
528
if (c_noOfFreePageRanges < tiprNoLevels) {
532
/* ---------------------------------------------------------------- */
533
/* NOW WE HAVE PERFORMED THE SEARCH UPWARDS AND FILLED IN THE */
534
/* PROPER FIELDS IN THE PAGE RANGE RECORD WHERE SOME SPACE */
535
/* WAS FOUND. THE NEXT STEP IS TO ALLOCATE PAGE RANGES SO */
536
/* THAT WE KEEP THE B-TREE BALANCED. THE NEW PAGE RANGE */
537
/* ARE ALSO PROPERLY UPDATED ON THE PATH TO THE LEAF LEVEL. */
538
/* ---------------------------------------------------------------- */
541
seizePagerange(newPageRangePtr);
543
ndbrequire(prevPageRangePtr.p->currentIndexPos < 4);
544
prevPageRangePtr.p->basePageId[prevPageRangePtr.p->currentIndexPos] = newPageRangePtr.i;
545
newPageRangePtr.p->parentPtr = prevPageRangePtr.i;
546
newPageRangePtr.p->currentIndexPos = 0;
547
if (tiprNoLevels > 0) {
549
newPageRangePtr.p->startRange[0] = regFragPtr->nextStartRange;
550
newPageRangePtr.p->endRange[0] = regFragPtr->nextStartRange - 1;
551
newPageRangePtr.p->type[0] = ZNON_LEAF;
552
prevPageRangePtr = newPageRangePtr;
558
return newPageRangePtr.i;
559
}//Dbtup::leafPageRangeFull()
561
void Dbtup::releasePagerange(PageRangePtr regPRPtr)
563
regPRPtr.p->nextFree = cfirstfreerange;
564
cfirstfreerange = regPRPtr.i;
565
c_noOfFreePageRanges++;
566
}//Dbtup::releasePagerange()
568
void Dbtup::seizePagerange(PageRangePtr& regPageRangePtr)
570
regPageRangePtr.i = cfirstfreerange;
571
ptrCheckGuard(regPageRangePtr, cnoOfPageRangeRec, pageRange);
572
cfirstfreerange = regPageRangePtr.p->nextFree;
573
regPageRangePtr.p->nextFree = RNIL;
574
regPageRangePtr.p->currentIndexPos = 0;
575
regPageRangePtr.p->parentPtr = RNIL;
576
for (Uint32 i = 0; i < 4; i++) {
577
regPageRangePtr.p->startRange[i] = 1;
578
regPageRangePtr.p->endRange[i] = 0;
579
regPageRangePtr.p->type[i] = ZNON_LEAF;
580
regPageRangePtr.p->basePageId[i] = (Uint32)-1;
582
c_noOfFreePageRanges--;
583
}//Dbtup::seizePagerange()
585
void Dbtup::errorHandler(Uint32 errorCode)
601
}//Dbtup::errorHandler()