2
* Copyright 2012 Google Inc.
4
* Licensed under the Apache License, Version 2.0 (the "License");
5
* you may not use this file except in compliance with the License.
6
* You may obtain a copy of the License at
8
* http://www.apache.org/licenses/LICENSE-2.0
10
* Unless required by applicable law or agreed to in writing, software
11
* distributed under the License is distributed on an "AS IS" BASIS,
12
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
* See the License for the specific language governing permissions and
14
* limitations under the License.
17
// Author: jmarantz@google.com (Joshua Marantz)
19
#ifndef PAGESPEED_KERNEL_CACHE_CACHE_BATCHER_H_
20
#define PAGESPEED_KERNEL_CACHE_CACHE_BATCHER_H_
24
#include "pagespeed/kernel/base/basictypes.h"
25
#include "pagespeed/kernel/base/scoped_ptr.h"
26
#include "pagespeed/kernel/base/string.h"
27
#include "pagespeed/kernel/base/string_util.h"
28
#include "pagespeed/kernel/cache/cache_interface.h"
30
namespace net_instaweb {
37
// Batches up cache lookups to exploit implementations that have MultiGet
38
// support. A fixed limit of outstanding cache lookups are passed through
39
// as single-key Gets when received to avoid adding latency. Above that,
40
// the keys & callbacks are queued until one of the outstanding Gets
41
// completes. When that occurs, the queued requests are passed as
42
// a single MultiGet request.
44
// There is also a maximum queue size. If Gets stream in faster than they
45
// are completed and the queue overflows, then we respond with a fast kNotFound.
47
// Note that this class is designed for use with an asynchronous cache
48
// implementation. To use this with a blocking cache implementation, please
49
// wrap the blocking cache in an AsyncCache.
50
class CacheBatcher : public CacheInterface {
52
// We are willing to only do a bounded number of parallel lookups.
53
// Note that this is independent of the number of keys in each lookup.
55
// By setting the default at 1, we get maximum batching and minimize
56
// the number of parallel lookups we do. Note that independent of
57
// this count, there is already substantial lookup parallelism
58
// because each Apache process has its own batcher, and there can be
59
// multiple Apache servers talking to the same cache.
61
// Further, the load-tests performed while developing this feature
62
// indicated that the best value was '1'.
63
static const int kDefaultMaxParallelLookups = 1;
65
// We batch up cache lookups until outstanding ones are complete.
66
// However, we bound the queue size in order to avoid exhausting
67
// memory. When the thread queues are saturated, we drop the
68
// requests, calling the callback immediately with kNotFound.
69
static const size_t kDefaultMaxQueueSize = 1000;
71
// Does not take ownership of the cache. Takes ownership of the mutex.
72
CacheBatcher(CacheInterface* cache, AbstractMutex* mutex,
73
Statistics* statistics);
74
virtual ~CacheBatcher();
76
// Startup-time (pre-construction) initialization of statistics
77
// variables so the correct-sized shared memory can be constructed
78
// in the root Apache process.
79
static void InitStats(Statistics* statistics);
81
virtual void Get(const GoogleString& key, Callback* callback);
82
virtual void Put(const GoogleString& key, SharedString* value);
83
virtual void Delete(const GoogleString& key);
84
virtual GoogleString Name() const;
85
static GoogleString FormatName(StringPiece cache, int parallelism, int max);
87
// Note: CacheBatcher cannot do any batching if given a blocking cache,
88
// however it is still functional so pass on the bit.
89
virtual bool IsBlocking() const { return cache_->IsBlocking(); }
91
int last_batch_size() const { return last_batch_size_; } // for testing
92
void set_max_queue_size(size_t n) { max_queue_size_ = n; }
93
void set_max_parallel_lookups(size_t n) { max_parallel_lookups_ = n; }
95
int Pending(); // This is used to help synchronize tests.
97
virtual bool IsHealthy() const { return cache_->IsHealthy(); }
98
virtual void ShutDown();
102
class BatcherCallback;
104
void GroupComplete();
105
bool CanIssueGet() const; // must be called with mutex_ held.
107
CacheInterface* cache_;
108
scoped_ptr<AbstractMutex> mutex_;
109
MultiGetRequest queue_;
110
int last_batch_size_;
112
int max_parallel_lookups_;
113
size_t max_queue_size_; // size_t so it can be compared to queue_.size().
114
Variable* dropped_gets_;
116
DISALLOW_COPY_AND_ASSIGN(CacheBatcher);
119
} // namespace net_instaweb
121
#endif // PAGESPEED_KERNEL_CACHE_CACHE_BATCHER_H_