1
// Copyright 2011 Google Inc. All Rights Reserved.
3
// Redistribution and use in source and binary forms, with or without
4
// modification, are permitted provided that the following conditions are
7
// * Redistributions of source code must retain the above copyright
8
// notice, this list of conditions and the following disclaimer.
9
// * Redistributions in binary form must reproduce the above
10
// copyright notice, this list of conditions and the following disclaimer
11
// in the documentation and/or other materials provided with the
13
// * Neither the name of Google Inc. nor the names of its
14
// contributors may be used to endorse or promote products derived from
15
// this software without specific prior written permission.
17
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
// Various stubs for the open-source version of Snappy.
31
#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
32
#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
45
#ifdef HAVE_SYS_MMAN_H
49
#include "snappy-stubs-public.h"
51
#if defined(__x86_64__)
53
// Enable 64-bit optimized versions of some routines.
58
// Needed by OS X, among others.
60
#define MAP_ANONYMOUS MAP_ANON
63
// Pull in std::min, std::ostream, and the likes. This is safe because this
64
// header file is never used from any public header files.
67
// The size of an array, if known at compile-time.
68
// Will give unexpected results if used on a pointer.
69
// We undefine it first, since some compilers already have a definition.
73
#define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
75
// Static prediction hints.
76
#ifdef HAVE_BUILTIN_EXPECT
77
#define PREDICT_FALSE(x) (__builtin_expect(x, 0))
78
#define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
80
#define PREDICT_FALSE(x) x
81
#define PREDICT_TRUE(x) x
84
// This is only used for recomputing the tag byte table used during
85
// decompression; for simplicity we just remove it from the open-source
86
// version (anyone who wants to regenerate it can just do the call
87
// themselves within main()).
88
#define DEFINE_bool(flag_name, default_value, description) \
89
bool FLAGS_ ## flag_name = default_value;
90
#define DECLARE_bool(flag_name) \
91
extern bool FLAGS_ ## flag_name;
92
#define REGISTER_MODULE_INITIALIZER(name, code)
96
static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF);
97
static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
101
#define LOG(level) LogMessage()
102
#define VLOG(level) true ? (void)0 : \
103
snappy::LogMessageVoidify() & snappy::LogMessage()
112
LogMessage& operator<<(const std::string& msg) {
116
LogMessage& operator<<(int x) {
122
// Asserts, both versions activated in debug mode only,
123
// and ones that are always active.
125
#define CRASH_UNLESS(condition) \
126
PREDICT_TRUE(condition) ? (void)0 : \
127
snappy::LogMessageVoidify() & snappy::LogMessageCrash()
129
class LogMessageCrash : public LogMessage {
131
LogMessageCrash() { }
138
// This class is used to explicitly ignore values in the conditional
139
// logging macros. This avoids compiler warnings like "value computed
140
// is not used" and "statement has no effect".
142
class LogMessageVoidify {
144
LogMessageVoidify() { }
145
// This has to be an operator with a precedence lower than << but
147
void operator&(const LogMessage&) { }
150
#define CHECK(cond) CRASH_UNLESS(cond)
151
#define CHECK_LE(a, b) CRASH_UNLESS((a) <= (b))
152
#define CHECK_GE(a, b) CRASH_UNLESS((a) >= (b))
153
#define CHECK_EQ(a, b) CRASH_UNLESS((a) == (b))
154
#define CHECK_NE(a, b) CRASH_UNLESS((a) != (b))
155
#define CHECK_LT(a, b) CRASH_UNLESS((a) < (b))
156
#define CHECK_GT(a, b) CRASH_UNLESS((a) > (b))
160
#define DCHECK(cond) CRASH_UNLESS(true)
161
#define DCHECK_LE(a, b) CRASH_UNLESS(true)
162
#define DCHECK_GE(a, b) CRASH_UNLESS(true)
163
#define DCHECK_EQ(a, b) CRASH_UNLESS(true)
164
#define DCHECK_NE(a, b) CRASH_UNLESS(true)
165
#define DCHECK_LT(a, b) CRASH_UNLESS(true)
166
#define DCHECK_GT(a, b) CRASH_UNLESS(true)
170
#define DCHECK(cond) CHECK(cond)
171
#define DCHECK_LE(a, b) CHECK_LE(a, b)
172
#define DCHECK_GE(a, b) CHECK_GE(a, b)
173
#define DCHECK_EQ(a, b) CHECK_EQ(a, b)
174
#define DCHECK_NE(a, b) CHECK_NE(a, b)
175
#define DCHECK_LT(a, b) CHECK_LT(a, b)
176
#define DCHECK_GT(a, b) CHECK_GT(a, b)
180
// Potentially unaligned loads and stores.
182
#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
184
#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
185
#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
186
#define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
188
#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
189
#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
190
#define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
194
// These functions are provided for architectures that don't support
195
// unaligned loads and stores.
197
inline uint16 UNALIGNED_LOAD16(const void *p) {
199
memcpy(&t, p, sizeof t);
203
inline uint32 UNALIGNED_LOAD32(const void *p) {
205
memcpy(&t, p, sizeof t);
209
inline uint64 UNALIGNED_LOAD64(const void *p) {
211
memcpy(&t, p, sizeof t);
215
inline void UNALIGNED_STORE16(void *p, uint16 v) {
216
memcpy(p, &v, sizeof v);
219
inline void UNALIGNED_STORE32(void *p, uint32 v) {
220
memcpy(p, &v, sizeof v);
223
inline void UNALIGNED_STORE64(void *p, uint64 v) {
224
memcpy(p, &v, sizeof v);
229
// The following guarantees declaration of the byte swap functions.
230
#ifdef WORDS_BIGENDIAN
234
#define bswap_16(x) _byteswap_ushort(x)
235
#define bswap_32(x) _byteswap_ulong(x)
236
#define bswap_64(x) _byteswap_uint64(x)
238
#elif defined(__APPLE__)
239
// Mac OS X / Darwin features
240
#include <libkern/OSByteOrder.h>
241
#define bswap_16(x) OSSwapInt16(x)
242
#define bswap_32(x) OSSwapInt32(x)
243
#define bswap_64(x) OSSwapInt64(x)
246
#include <byteswap.h>
249
#endif // WORDS_BIGENDIAN
251
// Convert to little-endian storage, opposite of network format.
252
// Convert x from host to little endian: x = LittleEndian.FromHost(x);
253
// convert x from little endian to host: x = LittleEndian.ToHost(x);
255
// Store values into unaligned memory converting to little endian order:
256
// LittleEndian.Store16(p, x);
258
// Load unaligned values stored in little endian converting to host order:
259
// x = LittleEndian.Load16(p);
262
// Conversion functions.
263
#ifdef WORDS_BIGENDIAN
265
static uint16 FromHost16(uint16 x) { return bswap_16(x); }
266
static uint16 ToHost16(uint16 x) { return bswap_16(x); }
268
static uint32 FromHost32(uint32 x) { return bswap_32(x); }
269
static uint32 ToHost32(uint32 x) { return bswap_32(x); }
271
static bool IsLittleEndian() { return false; }
273
#else // !defined(WORDS_BIGENDIAN)
275
static uint16 FromHost16(uint16 x) { return x; }
276
static uint16 ToHost16(uint16 x) { return x; }
278
static uint32 FromHost32(uint32 x) { return x; }
279
static uint32 ToHost32(uint32 x) { return x; }
281
static bool IsLittleEndian() { return true; }
283
#endif // !defined(WORDS_BIGENDIAN)
285
// Functions to do unaligned loads and stores in little-endian order.
286
static uint16 Load16(const void *p) {
287
return ToHost16(UNALIGNED_LOAD16(p));
290
static void Store16(void *p, uint16 v) {
291
UNALIGNED_STORE16(p, FromHost16(v));
294
static uint32 Load32(const void *p) {
295
return ToHost32(UNALIGNED_LOAD32(p));
298
static void Store32(void *p, uint32 v) {
299
UNALIGNED_STORE32(p, FromHost32(v));
303
// Some bit-manipulation functions.
306
// Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0.
307
static int Log2Floor(uint32 n);
309
// Return the first set least / most significant bit, 0-indexed. Returns an
310
// undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except
311
// that it's 0-indexed.
312
static int FindLSBSetNonZero(uint32 n);
313
static int FindLSBSetNonZero64(uint64 n);
316
DISALLOW_COPY_AND_ASSIGN(Bits);
319
#ifdef HAVE_BUILTIN_CTZ
321
inline int Bits::Log2Floor(uint32 n) {
322
return n == 0 ? -1 : 31 ^ __builtin_clz(n);
325
inline int Bits::FindLSBSetNonZero(uint32 n) {
326
return __builtin_ctz(n);
329
inline int Bits::FindLSBSetNonZero64(uint64 n) {
330
return __builtin_ctzll(n);
333
#else // Portable versions.
335
inline int Bits::Log2Floor(uint32 n) {
340
for (int i = 4; i >= 0; --i) {
341
int shift = (1 << i);
342
uint32 x = value >> shift;
352
inline int Bits::FindLSBSetNonZero(uint32 n) {
354
for (int i = 4, shift = 1 << 4; i >= 0; --i) {
355
const uint32 x = n << shift;
365
// FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
366
inline int Bits::FindLSBSetNonZero64(uint64 n) {
367
const uint32 bottombits = static_cast<uint32>(n);
368
if (bottombits == 0) {
369
// Bottom bits are zero, so scan in top bits
370
return 32 + FindLSBSetNonZero(static_cast<uint32>(n >> 32));
372
return FindLSBSetNonZero(bottombits);
376
#endif // End portable versions.
378
// Variable-length integer encoding.
381
// Maximum lengths of varint encoding of uint32.
382
static const int kMax32 = 5;
384
// Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
385
// Never reads a character at or beyond limit. If a valid/terminated varint32
386
// was found in the range, stores it in *OUTPUT and returns a pointer just
387
// past the last byte of the varint32. Else returns NULL. On success,
388
// "result <= limit".
389
static const char* Parse32WithLimit(const char* ptr, const char* limit,
392
// REQUIRES "ptr" points to a buffer of length sufficient to hold "v".
393
// EFFECTS Encodes "v" into "ptr" and returns a pointer to the
394
// byte just past the last encoded byte.
395
static char* Encode32(char* ptr, uint32 v);
397
// EFFECTS Appends the varint representation of "value" to "*s".
398
static void Append32(string* s, uint32 value);
401
inline const char* Varint::Parse32WithLimit(const char* p,
404
const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p);
405
const unsigned char* limit = reinterpret_cast<const unsigned char*>(l);
407
if (ptr >= limit) return NULL;
408
b = *(ptr++); result = b & 127; if (b < 128) goto done;
409
if (ptr >= limit) return NULL;
410
b = *(ptr++); result |= (b & 127) << 7; if (b < 128) goto done;
411
if (ptr >= limit) return NULL;
412
b = *(ptr++); result |= (b & 127) << 14; if (b < 128) goto done;
413
if (ptr >= limit) return NULL;
414
b = *(ptr++); result |= (b & 127) << 21; if (b < 128) goto done;
415
if (ptr >= limit) return NULL;
416
b = *(ptr++); result |= (b & 127) << 28; if (b < 16) goto done;
417
return NULL; // Value is too long to be a varint32
420
return reinterpret_cast<const char*>(ptr);
423
inline char* Varint::Encode32(char* sptr, uint32 v) {
424
// Operate on characters as unsigneds
425
unsigned char* ptr = reinterpret_cast<unsigned char*>(sptr);
426
static const int B = 128;
429
} else if (v < (1<<14)) {
432
} else if (v < (1<<21)) {
434
*(ptr++) = (v>>7) | B;
436
} else if (v < (1<<28)) {
438
*(ptr++) = (v>>7) | B;
439
*(ptr++) = (v>>14) | B;
443
*(ptr++) = (v>>7) | B;
444
*(ptr++) = (v>>14) | B;
445
*(ptr++) = (v>>21) | B;
448
return reinterpret_cast<char*>(ptr);
451
// If you know the internal layout of the std::string in use, you can
452
// replace this function with one that resizes the string without
453
// filling the new space with zeros (if applicable) --
454
// it will be non-portable but faster.
455
inline void STLStringResizeUninitialized(string* s, size_t new_size) {
459
// Return a mutable char* pointing to a string's internal buffer,
460
// which may not be null-terminated. Writing through this pointer will
461
// modify the string.
463
// string_as_array(&str)[i] is valid for 0 <= i < str.size() until the
464
// next call to a string method that invalidates iterators.
466
// As of 2006-04, there is no standard-blessed way of getting a
467
// mutable reference to a string's internal buffer. However, issue 530
468
// (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-defects.html#530)
469
// proposes this as the method. It will officially be part of the standard
470
// for C++0x. This should already work on all current implementations.
471
inline char* string_as_array(string* str) {
472
return str->empty() ? NULL : &*str->begin();
475
} // namespace snappy
477
#endif // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_