1
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE file. See the AUTHORS file for names of contributors.
5
#include "db/db_impl.h"
13
#include "db/builder.h"
14
#include "db/db_iter.h"
15
#include "db/dbformat.h"
16
#include "db/filename.h"
17
#include "db/log_reader.h"
18
#include "db/log_writer.h"
19
#include "db/memtable.h"
20
#include "db/table_cache.h"
21
#include "db/version_set.h"
22
#include "db/write_batch_internal.h"
23
#include "leveldb/db.h"
24
#include "leveldb/env.h"
25
#include "leveldb/status.h"
26
#include "leveldb/table.h"
27
#include "leveldb/table_builder.h"
28
#include "port/port.h"
29
#include "table/block.h"
30
#include "table/merger.h"
31
#include "table/two_level_iterator.h"
32
#include "util/coding.h"
33
#include "util/logging.h"
34
#include "util/mutexlock.h"
38
struct DBImpl::CompactionState {
39
Compaction* const compaction;
41
// Sequence numbers < smallest_snapshot are not significant since we
42
// will never have to service a snapshot below smallest_snapshot.
43
// Therefore if we have seen a sequence number S <= smallest_snapshot,
44
// we can drop all entries for the same key with sequence numbers < S.
45
SequenceNumber smallest_snapshot;
47
// Files produced by compaction
51
InternalKey smallest, largest;
53
std::vector<Output> outputs;
55
// State kept for output being generated
56
WritableFile* outfile;
57
TableBuilder* builder;
61
Output* current_output() { return &outputs[outputs.size()-1]; }
63
explicit CompactionState(Compaction* c)
71
// Fix user-supplied options to be reasonable
72
template <class T,class V>
73
static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
74
if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
75
if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
77
Options SanitizeOptions(const std::string& dbname,
78
const InternalKeyComparator* icmp,
81
result.comparator = icmp;
82
ClipToRange(&result.max_open_files, 20, 50000);
83
ClipToRange(&result.write_buffer_size, 64<<10, 1<<30);
84
ClipToRange(&result.block_size, 1<<10, 4<<20);
85
if (result.info_log == NULL) {
86
// Open a log file in the same directory as the db
87
src.env->CreateDir(dbname); // In case it does not exist
88
src.env->RenameFile(InfoLogFileName(dbname), OldInfoLogFileName(dbname));
89
Status s = src.env->NewLogger(InfoLogFileName(dbname), &result.info_log);
91
// No place suitable for logging
92
result.info_log = NULL;
95
if (result.block_cache == NULL) {
96
result.block_cache = NewLRUCache(8 << 20);
101
DBImpl::DBImpl(const Options& options, const std::string& dbname)
103
internal_comparator_(options.comparator),
104
options_(SanitizeOptions(dbname, &internal_comparator_, options)),
105
owns_info_log_(options_.info_log != options.info_log),
106
owns_cache_(options_.block_cache != options.block_cache),
109
shutting_down_(NULL),
111
mem_(new MemTable(internal_comparator_)),
118
bg_compaction_scheduled_(false),
119
manual_compaction_(NULL) {
121
has_imm_.Release_Store(NULL);
123
// Reserve ten files or so for other uses and give the rest to TableCache.
124
const int table_cache_size = options.max_open_files - 10;
125
table_cache_ = new TableCache(dbname_, &options_, table_cache_size);
127
versions_ = new VersionSet(dbname_, &options_, table_cache_,
128
&internal_comparator_);
132
// Wait for background work to finish
134
shutting_down_.Release_Store(this); // Any non-NULL value is ok
135
while (bg_compaction_scheduled_) {
140
if (db_lock_ != NULL) {
141
env_->UnlockFile(db_lock_);
145
if (mem_ != NULL) mem_->Unref();
146
if (imm_ != NULL) imm_->Unref();
151
if (owns_info_log_) {
152
delete options_.info_log;
155
delete options_.block_cache;
159
Status DBImpl::NewDB() {
161
new_db.SetComparatorName(user_comparator()->Name());
162
new_db.SetLogNumber(0);
163
new_db.SetNextFile(2);
164
new_db.SetLastSequence(0);
166
const std::string manifest = DescriptorFileName(dbname_, 1);
168
Status s = env_->NewWritableFile(manifest, &file);
173
log::Writer log(file);
175
new_db.EncodeTo(&record);
176
s = log.AddRecord(record);
183
// Make "CURRENT" file that points to the new manifest file.
184
s = SetCurrentFile(env_, dbname_, 1);
186
env_->DeleteFile(manifest);
191
void DBImpl::MaybeIgnoreError(Status* s) const {
192
if (s->ok() || options_.paranoid_checks) {
195
Log(options_.info_log, "Ignoring error %s", s->ToString().c_str());
200
void DBImpl::DeleteObsoleteFiles() {
201
// Make a set of all of the live files
202
std::set<uint64_t> live = pending_outputs_;
203
versions_->AddLiveFiles(&live);
205
std::vector<std::string> filenames;
206
env_->GetChildren(dbname_, &filenames); // Ignoring errors on purpose
209
for (size_t i = 0; i < filenames.size(); i++) {
210
if (ParseFileName(filenames[i], &number, &type)) {
214
keep = ((number >= versions_->LogNumber()) ||
215
(number == versions_->PrevLogNumber()));
217
case kDescriptorFile:
218
// Keep my manifest file, and any newer incarnations'
219
// (in case there is a race that allows other incarnations)
220
keep = (number >= versions_->ManifestFileNumber());
223
keep = (live.find(number) != live.end());
226
// Any temp files that are currently being written to must
227
// be recorded in pending_outputs_, which is inserted into "live"
228
keep = (live.find(number) != live.end());
238
if (type == kTableFile) {
239
table_cache_->Evict(number);
241
Log(options_.info_log, "Delete type=%d #%lld\n",
243
static_cast<unsigned long long>(number));
244
env_->DeleteFile(dbname_ + "/" + filenames[i]);
250
Status DBImpl::Recover(VersionEdit* edit) {
253
// Ignore error from CreateDir since the creation of the DB is
254
// committed only when the descriptor is created, and this directory
255
// may already exist from a previous failed creation attempt.
256
env_->CreateDir(dbname_);
257
assert(db_lock_ == NULL);
258
Status s = env_->LockFile(LockFileName(dbname_), &db_lock_);
263
if (!env_->FileExists(CurrentFileName(dbname_))) {
264
if (options_.create_if_missing) {
270
return Status::InvalidArgument(
271
dbname_, "does not exist (create_if_missing is false)");
274
if (options_.error_if_exists) {
275
return Status::InvalidArgument(
276
dbname_, "exists (error_if_exists is true)");
280
s = versions_->Recover();
282
SequenceNumber max_sequence(0);
284
// Recover from all newer log files than the ones named in the
285
// descriptor (new log files may have been added by the previous
286
// incarnation without registering them in the descriptor).
288
// Note that PrevLogNumber() is no longer used, but we pay
289
// attention to it in case we are recovering a database
290
// produced by an older version of leveldb.
291
const uint64_t min_log = versions_->LogNumber();
292
const uint64_t prev_log = versions_->PrevLogNumber();
293
std::vector<std::string> filenames;
294
s = env_->GetChildren(dbname_, &filenames);
300
std::vector<uint64_t> logs;
301
for (size_t i = 0; i < filenames.size(); i++) {
302
if (ParseFileName(filenames[i], &number, &type)
304
&& ((number >= min_log) || (number == prev_log))) {
305
logs.push_back(number);
309
// Recover in the order in which the logs were generated
310
std::sort(logs.begin(), logs.end());
311
for (size_t i = 0; i < logs.size(); i++) {
312
s = RecoverLogFile(logs[i], edit, &max_sequence);
314
// The previous incarnation may not have written any MANIFEST
315
// records after allocating this log number. So we manually
316
// update the file number allocation counter in VersionSet.
317
versions_->MarkFileNumberUsed(logs[i]);
321
if (versions_->LastSequence() < max_sequence) {
322
versions_->SetLastSequence(max_sequence);
330
Status DBImpl::RecoverLogFile(uint64_t log_number,
332
SequenceNumber* max_sequence) {
333
struct LogReporter : public log::Reader::Reporter {
337
Status* status; // NULL if options_.paranoid_checks==false
338
virtual void Corruption(size_t bytes, const Status& s) {
339
Log(info_log, "%s%s: dropping %d bytes; %s",
340
(this->status == NULL ? "(ignoring error) " : ""),
341
fname, static_cast<int>(bytes), s.ToString().c_str());
342
if (this->status != NULL && this->status->ok()) *this->status = s;
349
std::string fname = LogFileName(dbname_, log_number);
350
SequentialFile* file;
351
Status status = env_->NewSequentialFile(fname, &file);
353
MaybeIgnoreError(&status);
357
// Create the log reader.
358
LogReporter reporter;
360
reporter.info_log = options_.info_log;
361
reporter.fname = fname.c_str();
362
reporter.status = (options_.paranoid_checks ? &status : NULL);
363
// We intentially make log::Reader do checksumming even if
364
// paranoid_checks==false so that corruptions cause entire commits
365
// to be skipped instead of propagating bad information (like overly
366
// large sequence numbers).
367
log::Reader reader(file, &reporter, true/*checksum*/,
368
0/*initial_offset*/);
369
Log(options_.info_log, "Recovering log #%llu",
370
(unsigned long long) log_number);
372
// Read all the records and add to a memtable
376
MemTable* mem = NULL;
377
while (reader.ReadRecord(&record, &scratch) &&
379
if (record.size() < 12) {
381
record.size(), Status::Corruption("log record too small"));
384
WriteBatchInternal::SetContents(&batch, record);
387
mem = new MemTable(internal_comparator_);
390
status = WriteBatchInternal::InsertInto(&batch, mem);
391
MaybeIgnoreError(&status);
395
const SequenceNumber last_seq =
396
WriteBatchInternal::Sequence(&batch) +
397
WriteBatchInternal::Count(&batch) - 1;
398
if (last_seq > *max_sequence) {
399
*max_sequence = last_seq;
402
if (mem->ApproximateMemoryUsage() > options_.write_buffer_size) {
403
status = WriteLevel0Table(mem, edit, NULL);
405
// Reflect errors immediately so that conditions like full
406
// file-systems cause the DB::Open() to fail.
414
if (status.ok() && mem != NULL) {
415
status = WriteLevel0Table(mem, edit, NULL);
416
// Reflect errors immediately so that conditions like full
417
// file-systems cause the DB::Open() to fail.
420
if (mem != NULL) mem->Unref();
425
Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
428
const uint64_t start_micros = env_->NowMicros();
430
meta.number = versions_->NewFileNumber();
431
pending_outputs_.insert(meta.number);
432
Iterator* iter = mem->NewIterator();
433
Log(options_.info_log, "Level-0 table #%llu: started",
434
(unsigned long long) meta.number);
439
s = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta);
443
Log(options_.info_log, "Level-0 table #%llu: %lld bytes %s",
444
(unsigned long long) meta.number,
445
(unsigned long long) meta.file_size,
446
s.ToString().c_str());
448
pending_outputs_.erase(meta.number);
451
// Note that if file_size is zero, the file has been deleted and
452
// should not be added to the manifest.
454
if (s.ok() && meta.file_size > 0) {
455
const Slice min_user_key = meta.smallest.user_key();
456
const Slice max_user_key = meta.largest.user_key();
458
level = base->PickLevelForMemTableOutput(min_user_key, max_user_key);
460
edit->AddFile(level, meta.number, meta.file_size,
461
meta.smallest, meta.largest);
464
CompactionStats stats;
465
stats.micros = env_->NowMicros() - start_micros;
466
stats.bytes_written = meta.file_size;
467
stats_[level].Add(stats);
471
Status DBImpl::CompactMemTable() {
473
assert(imm_ != NULL);
475
// Save the contents of the memtable as a new Table
477
Version* base = versions_->current();
479
Status s = WriteLevel0Table(imm_, &edit, base);
482
if (s.ok() && shutting_down_.Acquire_Load()) {
483
s = Status::IOError("Deleting DB during memtable compaction");
486
// Replace immutable memtable with the generated Table
488
edit.SetPrevLogNumber(0);
489
edit.SetLogNumber(logfile_number_); // Earlier logs no longer needed
490
s = versions_->LogAndApply(&edit, &mutex_);
494
// Commit to the new state
497
has_imm_.Release_Store(NULL);
498
DeleteObsoleteFiles();
504
void DBImpl::CompactRange(const Slice* begin, const Slice* end) {
505
int max_level_with_files = 1;
507
MutexLock l(&mutex_);
508
Version* base = versions_->current();
509
for (int level = 1; level < config::kNumLevels; level++) {
510
if (base->OverlapInLevel(level, begin, end)) {
511
max_level_with_files = level;
515
TEST_CompactMemTable(); // TODO(sanjay): Skip if memtable does not overlap
516
for (int level = 0; level < max_level_with_files; level++) {
517
TEST_CompactRange(level, begin, end);
521
void DBImpl::TEST_CompactRange(int level, const Slice* begin,const Slice* end) {
523
assert(level + 1 < config::kNumLevels);
525
InternalKey begin_storage, end_storage;
527
ManualCompaction manual;
528
manual.level = level;
533
begin_storage = InternalKey(*begin, kMaxSequenceNumber, kValueTypeForSeek);
534
manual.begin = &begin_storage;
539
end_storage = InternalKey(*end, 0, static_cast<ValueType>(0));
540
manual.end = &end_storage;
543
MutexLock l(&mutex_);
544
while (!manual.done) {
545
while (manual_compaction_ != NULL) {
548
manual_compaction_ = &manual;
549
MaybeScheduleCompaction();
550
while (manual_compaction_ == &manual) {
556
Status DBImpl::TEST_CompactMemTable() {
557
MutexLock l(&mutex_);
559
AcquireLoggingResponsibility(&self);
560
Status s = MakeRoomForWrite(true /* force compaction */);
561
ReleaseLoggingResponsibility(&self);
563
// Wait until the compaction completes
564
while (imm_ != NULL && bg_error_.ok()) {
574
void DBImpl::MaybeScheduleCompaction() {
576
if (bg_compaction_scheduled_) {
578
} else if (shutting_down_.Acquire_Load()) {
579
// DB is being deleted; no more background compactions
580
} else if (imm_ == NULL &&
581
manual_compaction_ == NULL &&
582
!versions_->NeedsCompaction()) {
583
// No work to be done
585
bg_compaction_scheduled_ = true;
586
env_->Schedule(&DBImpl::BGWork, this);
590
void DBImpl::BGWork(void* db) {
591
reinterpret_cast<DBImpl*>(db)->BackgroundCall();
594
void DBImpl::BackgroundCall() {
595
MutexLock l(&mutex_);
596
assert(bg_compaction_scheduled_);
597
if (!shutting_down_.Acquire_Load()) {
598
BackgroundCompaction();
600
bg_compaction_scheduled_ = false;
602
// Previous compaction may have produced too many files in a level,
603
// so reschedule another compaction if needed.
604
MaybeScheduleCompaction();
608
void DBImpl::BackgroundCompaction() {
617
bool is_manual = (manual_compaction_ != NULL);
618
InternalKey manual_end;
620
ManualCompaction* m = manual_compaction_;
621
c = versions_->CompactRange(m->level, m->begin, m->end);
622
m->done = (c == NULL);
624
manual_end = c->input(0, c->num_input_files(0) - 1)->largest;
626
Log(options_.info_log,
627
"Manual compaction at level-%d from %s .. %s; will stop at %s\n",
629
(m->begin ? m->begin->DebugString().c_str() : "(begin)"),
630
(m->end ? m->end->DebugString().c_str() : "(end)"),
631
(m->done ? "(end)" : manual_end.DebugString().c_str()));
633
c = versions_->PickCompaction();
639
} else if (!is_manual && c->IsTrivialMove()) {
640
// Move file to next level
641
assert(c->num_input_files(0) == 1);
642
FileMetaData* f = c->input(0, 0);
643
c->edit()->DeleteFile(c->level(), f->number);
644
c->edit()->AddFile(c->level() + 1, f->number, f->file_size,
645
f->smallest, f->largest);
646
status = versions_->LogAndApply(c->edit(), &mutex_);
647
VersionSet::LevelSummaryStorage tmp;
648
Log(options_.info_log, "Moved #%lld to level-%d %lld bytes %s: %s\n",
649
static_cast<unsigned long long>(f->number),
651
static_cast<unsigned long long>(f->file_size),
652
status.ToString().c_str(),
653
versions_->LevelSummary(&tmp));
655
CompactionState* compact = new CompactionState(c);
656
status = DoCompactionWork(compact);
657
CleanupCompaction(compact);
659
DeleteObsoleteFiles();
665
} else if (shutting_down_.Acquire_Load()) {
666
// Ignore compaction errors found during shutting down
668
Log(options_.info_log,
669
"Compaction error: %s", status.ToString().c_str());
670
if (options_.paranoid_checks && bg_error_.ok()) {
676
ManualCompaction* m = manual_compaction_;
681
// We only compacted part of the requested range. Update *m
682
// to the range that is left to be compacted.
683
m->tmp_storage = manual_end;
684
m->begin = &m->tmp_storage;
686
manual_compaction_ = NULL;
690
void DBImpl::CleanupCompaction(CompactionState* compact) {
692
if (compact->builder != NULL) {
693
// May happen if we get a shutdown call in the middle of compaction
694
compact->builder->Abandon();
695
delete compact->builder;
697
assert(compact->outfile == NULL);
699
delete compact->outfile;
700
for (size_t i = 0; i < compact->outputs.size(); i++) {
701
const CompactionState::Output& out = compact->outputs[i];
702
pending_outputs_.erase(out.number);
707
Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) {
708
assert(compact != NULL);
709
assert(compact->builder == NULL);
710
uint64_t file_number;
713
file_number = versions_->NewFileNumber();
714
pending_outputs_.insert(file_number);
715
CompactionState::Output out;
716
out.number = file_number;
717
out.smallest.Clear();
719
compact->outputs.push_back(out);
723
// Make the output file
724
std::string fname = TableFileName(dbname_, file_number);
725
Status s = env_->NewWritableFile(fname, &compact->outfile);
727
compact->builder = new TableBuilder(options_, compact->outfile);
732
Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
734
assert(compact != NULL);
735
assert(compact->outfile != NULL);
736
assert(compact->builder != NULL);
738
const uint64_t output_number = compact->current_output()->number;
739
assert(output_number != 0);
741
// Check for iterator errors
742
Status s = input->status();
743
const uint64_t current_entries = compact->builder->NumEntries();
745
s = compact->builder->Finish();
747
compact->builder->Abandon();
749
const uint64_t current_bytes = compact->builder->FileSize();
750
compact->current_output()->file_size = current_bytes;
751
compact->total_bytes += current_bytes;
752
delete compact->builder;
753
compact->builder = NULL;
755
// Finish and check for file errors
757
s = compact->outfile->Sync();
760
s = compact->outfile->Close();
762
delete compact->outfile;
763
compact->outfile = NULL;
765
if (s.ok() && current_entries > 0) {
766
// Verify that the table is usable
767
Iterator* iter = table_cache_->NewIterator(ReadOptions(),
773
Log(options_.info_log,
774
"Generated table #%llu: %lld keys, %lld bytes",
775
(unsigned long long) output_number,
776
(unsigned long long) current_entries,
777
(unsigned long long) current_bytes);
784
Status DBImpl::InstallCompactionResults(CompactionState* compact) {
786
Log(options_.info_log, "Compacted %d@%d + %d@%d files => %lld bytes",
787
compact->compaction->num_input_files(0),
788
compact->compaction->level(),
789
compact->compaction->num_input_files(1),
790
compact->compaction->level() + 1,
791
static_cast<long long>(compact->total_bytes));
793
// Add compaction outputs
794
compact->compaction->AddInputDeletions(compact->compaction->edit());
795
const int level = compact->compaction->level();
796
for (size_t i = 0; i < compact->outputs.size(); i++) {
797
const CompactionState::Output& out = compact->outputs[i];
798
compact->compaction->edit()->AddFile(
800
out.number, out.file_size, out.smallest, out.largest);
802
return versions_->LogAndApply(compact->compaction->edit(), &mutex_);
805
Status DBImpl::DoCompactionWork(CompactionState* compact) {
806
const uint64_t start_micros = env_->NowMicros();
807
int64_t imm_micros = 0; // Micros spent doing imm_ compactions
809
Log(options_.info_log, "Compacting %d@%d + %d@%d files",
810
compact->compaction->num_input_files(0),
811
compact->compaction->level(),
812
compact->compaction->num_input_files(1),
813
compact->compaction->level() + 1);
815
assert(versions_->NumLevelFiles(compact->compaction->level()) > 0);
816
assert(compact->builder == NULL);
817
assert(compact->outfile == NULL);
818
if (snapshots_.empty()) {
819
compact->smallest_snapshot = versions_->LastSequence();
821
compact->smallest_snapshot = snapshots_.oldest()->number_;
824
// Release mutex while we're actually doing the compaction work
827
Iterator* input = versions_->MakeInputIterator(compact->compaction);
828
input->SeekToFirst();
830
ParsedInternalKey ikey;
831
std::string current_user_key;
832
bool has_current_user_key = false;
833
SequenceNumber last_sequence_for_key = kMaxSequenceNumber;
834
for (; input->Valid() && !shutting_down_.Acquire_Load(); ) {
835
// Prioritize immutable compaction work
836
if (has_imm_.NoBarrier_Load() != NULL) {
837
const uint64_t imm_start = env_->NowMicros();
841
bg_cv_.SignalAll(); // Wakeup MakeRoomForWrite() if necessary
844
imm_micros += (env_->NowMicros() - imm_start);
847
Slice key = input->key();
848
if (compact->compaction->ShouldStopBefore(key) &&
849
compact->builder != NULL) {
850
status = FinishCompactionOutputFile(compact, input);
856
// Handle key/value, add to state, etc.
858
if (!ParseInternalKey(key, &ikey)) {
859
// Do not hide error keys
860
current_user_key.clear();
861
has_current_user_key = false;
862
last_sequence_for_key = kMaxSequenceNumber;
864
if (!has_current_user_key ||
865
user_comparator()->Compare(ikey.user_key,
866
Slice(current_user_key)) != 0) {
867
// First occurrence of this user key
868
current_user_key.assign(ikey.user_key.data(), ikey.user_key.size());
869
has_current_user_key = true;
870
last_sequence_for_key = kMaxSequenceNumber;
873
if (last_sequence_for_key <= compact->smallest_snapshot) {
874
// Hidden by an newer entry for same user key
876
} else if (ikey.type == kTypeDeletion &&
877
ikey.sequence <= compact->smallest_snapshot &&
878
compact->compaction->IsBaseLevelForKey(ikey.user_key)) {
879
// For this user key:
880
// (1) there is no data in higher levels
881
// (2) data in lower levels will have larger sequence numbers
882
// (3) data in layers that are being compacted here and have
883
// smaller sequence numbers will be dropped in the next
884
// few iterations of this loop (by rule (A) above).
885
// Therefore this deletion marker is obsolete and can be dropped.
889
last_sequence_for_key = ikey.sequence;
892
Log(options_.info_log,
893
" Compact: %s, seq %d, type: %d %d, drop: %d, is_base: %d, "
894
"%d smallest_snapshot: %d",
895
ikey.user_key.ToString().c_str(),
896
(int)ikey.sequence, ikey.type, kTypeValue, drop,
897
compact->compaction->IsBaseLevelForKey(ikey.user_key),
898
(int)last_sequence_for_key, (int)compact->smallest_snapshot);
902
// Open output file if necessary
903
if (compact->builder == NULL) {
904
status = OpenCompactionOutputFile(compact);
909
if (compact->builder->NumEntries() == 0) {
910
compact->current_output()->smallest.DecodeFrom(key);
912
compact->current_output()->largest.DecodeFrom(key);
913
compact->builder->Add(key, input->value());
915
// Close output file if it is big enough
916
if (compact->builder->FileSize() >=
917
compact->compaction->MaxOutputFileSize()) {
918
status = FinishCompactionOutputFile(compact, input);
928
if (status.ok() && shutting_down_.Acquire_Load()) {
929
status = Status::IOError("Deleting DB during compaction");
931
if (status.ok() && compact->builder != NULL) {
932
status = FinishCompactionOutputFile(compact, input);
935
status = input->status();
940
CompactionStats stats;
941
stats.micros = env_->NowMicros() - start_micros - imm_micros;
942
for (int which = 0; which < 2; which++) {
943
for (int i = 0; i < compact->compaction->num_input_files(which); i++) {
944
stats.bytes_read += compact->compaction->input(which, i)->file_size;
947
for (size_t i = 0; i < compact->outputs.size(); i++) {
948
stats.bytes_written += compact->outputs[i].file_size;
952
stats_[compact->compaction->level() + 1].Add(stats);
955
status = InstallCompactionResults(compact);
957
VersionSet::LevelSummaryStorage tmp;
958
Log(options_.info_log,
959
"compacted to: %s", versions_->LevelSummary(&tmp));
971
static void CleanupIteratorState(void* arg1, void* arg2) {
972
IterState* state = reinterpret_cast<IterState*>(arg1);
975
if (state->imm != NULL) state->imm->Unref();
976
state->version->Unref();
982
Iterator* DBImpl::NewInternalIterator(const ReadOptions& options,
983
SequenceNumber* latest_snapshot) {
984
IterState* cleanup = new IterState;
986
*latest_snapshot = versions_->LastSequence();
988
// Collect together all needed child iterators
989
std::vector<Iterator*> list;
990
list.push_back(mem_->NewIterator());
993
list.push_back(imm_->NewIterator());
996
versions_->current()->AddIterators(options, &list);
997
Iterator* internal_iter =
998
NewMergingIterator(&internal_comparator_, &list[0], list.size());
999
versions_->current()->Ref();
1001
cleanup->mu = &mutex_;
1002
cleanup->mem = mem_;
1003
cleanup->imm = imm_;
1004
cleanup->version = versions_->current();
1005
internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, NULL);
1008
return internal_iter;
1011
Iterator* DBImpl::TEST_NewInternalIterator() {
1012
SequenceNumber ignored;
1013
return NewInternalIterator(ReadOptions(), &ignored);
1016
int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes() {
1017
MutexLock l(&mutex_);
1018
return versions_->MaxNextLevelOverlappingBytes();
1021
Status DBImpl::Get(const ReadOptions& options,
1023
std::string* value) {
1025
MutexLock l(&mutex_);
1026
SequenceNumber snapshot;
1027
if (options.snapshot != NULL) {
1028
snapshot = reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_;
1030
snapshot = versions_->LastSequence();
1033
MemTable* mem = mem_;
1034
MemTable* imm = imm_;
1035
Version* current = versions_->current();
1037
if (imm != NULL) imm->Ref();
1040
bool have_stat_update = false;
1041
Version::GetStats stats;
1043
// Unlock while reading from files and memtables
1046
// First look in the memtable, then in the immutable memtable (if any).
1047
LookupKey lkey(key, snapshot);
1048
if (mem->Get(lkey, value, &s)) {
1050
} else if (imm != NULL && imm->Get(lkey, value, &s)) {
1053
s = current->Get(options, lkey, value, &stats);
1054
have_stat_update = true;
1059
if (have_stat_update && current->UpdateStats(stats)) {
1060
MaybeScheduleCompaction();
1063
if (imm != NULL) imm->Unref();
1068
Iterator* DBImpl::NewIterator(const ReadOptions& options) {
1069
SequenceNumber latest_snapshot;
1070
Iterator* internal_iter = NewInternalIterator(options, &latest_snapshot);
1071
return NewDBIterator(
1072
&dbname_, env_, user_comparator(), internal_iter,
1073
(options.snapshot != NULL
1074
? reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_
1075
: latest_snapshot));
1078
const Snapshot* DBImpl::GetSnapshot() {
1079
MutexLock l(&mutex_);
1080
return snapshots_.New(versions_->LastSequence());
1083
void DBImpl::ReleaseSnapshot(const Snapshot* s) {
1084
MutexLock l(&mutex_);
1085
snapshots_.Delete(reinterpret_cast<const SnapshotImpl*>(s));
1088
// Convenience methods
1089
Status DBImpl::Put(const WriteOptions& o, const Slice& key, const Slice& val) {
1090
return DB::Put(o, key, val);
1093
Status DBImpl::Delete(const WriteOptions& options, const Slice& key) {
1094
return DB::Delete(options, key);
1097
// There is at most one thread that is the current logger. This call
1098
// waits until preceding logger(s) have finished and becomes the
1100
void DBImpl::AcquireLoggingResponsibility(LoggerId* self) {
1101
while (logger_ != NULL) {
1107
void DBImpl::ReleaseLoggingResponsibility(LoggerId* self) {
1108
assert(logger_ == self);
1110
logger_cv_.SignalAll();
1113
Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) {
1115
MutexLock l(&mutex_);
1117
AcquireLoggingResponsibility(&self);
1118
status = MakeRoomForWrite(false); // May temporarily release lock and wait
1119
uint64_t last_sequence = versions_->LastSequence();
1121
WriteBatchInternal::SetSequence(updates, last_sequence + 1);
1122
last_sequence += WriteBatchInternal::Count(updates);
1124
// Add to log and apply to memtable. We can release the lock during
1125
// this phase since the "logger_" flag protects against concurrent
1126
// loggers and concurrent writes into mem_.
1128
assert(logger_ == &self);
1130
status = log_->AddRecord(WriteBatchInternal::Contents(updates));
1131
if (status.ok() && options.sync) {
1132
status = logfile_->Sync();
1135
status = WriteBatchInternal::InsertInto(updates, mem_);
1138
assert(logger_ == &self);
1141
versions_->SetLastSequence(last_sequence);
1143
ReleaseLoggingResponsibility(&self);
1147
// REQUIRES: mutex_ is held
1148
// REQUIRES: this thread is the current logger
1149
Status DBImpl::MakeRoomForWrite(bool force) {
1150
mutex_.AssertHeld();
1151
assert(logger_ != NULL);
1152
bool allow_delay = !force;
1155
if (!bg_error_.ok()) {
1156
// Yield previous error
1161
versions_->NumLevelFiles(0) >= config::kL0_SlowdownWritesTrigger) {
1162
// We are getting close to hitting a hard limit on the number of
1163
// L0 files. Rather than delaying a single write by several
1164
// seconds when we hit the hard limit, start delaying each
1165
// individual write by 1ms to reduce latency variance. Also,
1166
// this delay hands over some CPU to the compaction thread in
1167
// case it is sharing the same core as the writer.
1169
env_->SleepForMicroseconds(1000);
1170
allow_delay = false; // Do not delay a single write more than once
1172
} else if (!force &&
1173
(mem_->ApproximateMemoryUsage() <= options_.write_buffer_size)) {
1174
// There is room in current memtable
1176
} else if (imm_ != NULL) {
1177
// We have filled up the current memtable, but the previous
1178
// one is still being compacted, so we wait.
1180
} else if (versions_->NumLevelFiles(0) >= config::kL0_StopWritesTrigger) {
1181
// There are too many level-0 files.
1182
Log(options_.info_log, "waiting...\n");
1185
// Attempt to switch to a new memtable and trigger compaction of old
1186
assert(versions_->PrevLogNumber() == 0);
1187
uint64_t new_log_number = versions_->NewFileNumber();
1188
WritableFile* lfile = NULL;
1189
s = env_->NewWritableFile(LogFileName(dbname_, new_log_number), &lfile);
1196
logfile_number_ = new_log_number;
1197
log_ = new log::Writer(lfile);
1199
has_imm_.Release_Store(imm_);
1200
mem_ = new MemTable(internal_comparator_);
1202
force = false; // Do not force another compaction if have room
1203
MaybeScheduleCompaction();
1209
bool DBImpl::GetProperty(const Slice& property, std::string* value) {
1212
MutexLock l(&mutex_);
1213
Slice in = property;
1214
Slice prefix("leveldb.");
1215
if (!in.starts_with(prefix)) return false;
1216
in.remove_prefix(prefix.size());
1218
if (in.starts_with("num-files-at-level")) {
1219
in.remove_prefix(strlen("num-files-at-level"));
1221
bool ok = ConsumeDecimalNumber(&in, &level) && in.empty();
1222
if (!ok || level >= config::kNumLevels) {
1226
snprintf(buf, sizeof(buf), "%d",
1227
versions_->NumLevelFiles(static_cast<int>(level)));
1231
} else if (in == "stats") {
1233
snprintf(buf, sizeof(buf),
1235
"Level Files Size(MB) Time(sec) Read(MB) Write(MB)\n"
1236
"--------------------------------------------------\n"
1239
for (int level = 0; level < config::kNumLevels; level++) {
1240
int files = versions_->NumLevelFiles(level);
1241
if (stats_[level].micros > 0 || files > 0) {
1244
"%3d %8d %8.0f %9.0f %8.0f %9.0f\n",
1247
versions_->NumLevelBytes(level) / 1048576.0,
1248
stats_[level].micros / 1e6,
1249
stats_[level].bytes_read / 1048576.0,
1250
stats_[level].bytes_written / 1048576.0);
1255
} else if (in == "sstables") {
1256
*value = versions_->current()->DebugString();
1263
void DBImpl::GetApproximateSizes(
1264
const Range* range, int n,
1266
// TODO(opt): better implementation
1269
MutexLock l(&mutex_);
1270
versions_->current()->Ref();
1271
v = versions_->current();
1274
for (int i = 0; i < n; i++) {
1275
// Convert user_key into a corresponding internal key.
1276
InternalKey k1(range[i].start, kMaxSequenceNumber, kValueTypeForSeek);
1277
InternalKey k2(range[i].limit, kMaxSequenceNumber, kValueTypeForSeek);
1278
uint64_t start = versions_->ApproximateOffsetOf(v, k1);
1279
uint64_t limit = versions_->ApproximateOffsetOf(v, k2);
1280
sizes[i] = (limit >= start ? limit - start : 0);
1284
MutexLock l(&mutex_);
1289
// Default implementations of convenience methods that subclasses of DB
1290
// can call if they wish
1291
Status DB::Put(const WriteOptions& opt, const Slice& key, const Slice& value) {
1293
batch.Put(key, value);
1294
return Write(opt, &batch);
1297
Status DB::Delete(const WriteOptions& opt, const Slice& key) {
1300
return Write(opt, &batch);
1305
Status DB::Open(const Options& options, const std::string& dbname,
1309
DBImpl* impl = new DBImpl(options, dbname);
1310
impl->mutex_.Lock();
1312
Status s = impl->Recover(&edit); // Handles create_if_missing, error_if_exists
1314
uint64_t new_log_number = impl->versions_->NewFileNumber();
1315
WritableFile* lfile;
1316
s = options.env->NewWritableFile(LogFileName(dbname, new_log_number),
1319
edit.SetLogNumber(new_log_number);
1320
impl->logfile_ = lfile;
1321
impl->logfile_number_ = new_log_number;
1322
impl->log_ = new log::Writer(lfile);
1323
s = impl->versions_->LogAndApply(&edit, &impl->mutex_);
1326
impl->DeleteObsoleteFiles();
1327
impl->MaybeScheduleCompaction();
1330
impl->mutex_.Unlock();
1339
Snapshot::~Snapshot() {
1342
Status DestroyDB(const std::string& dbname, const Options& options) {
1343
Env* env = options.env;
1344
std::vector<std::string> filenames;
1345
// Ignore error in case directory does not exist
1346
env->GetChildren(dbname, &filenames);
1347
if (filenames.empty()) {
1348
return Status::OK();
1352
const std::string lockname = LockFileName(dbname);
1353
Status result = env->LockFile(lockname, &lock);
1357
for (size_t i = 0; i < filenames.size(); i++) {
1358
if (ParseFileName(filenames[i], &number, &type) &&
1359
filenames[i] != lockname) { // Lock file will be deleted at end
1360
Status del = env->DeleteFile(dbname + "/" + filenames[i]);
1361
if (result.ok() && !del.ok()) {
1366
env->UnlockFile(lock); // Ignore error since state is already gone
1367
env->DeleteFile(lockname);
1368
env->DeleteDir(dbname); // Ignore error in case dir contains other files
1373
} // namespace leveldb