86
#define SHA256_INCR(ctx) ((ctx)->count_high += !++(ctx)->count_low)
89
89
sha256_update(struct sha256_ctx *ctx,
90
unsigned length, const uint8_t *buffer)
90
unsigned length, const uint8_t *data)
93
{ /* Try to fill partial block */
94
unsigned left = SHA256_DATA_SIZE - ctx->index;
97
memcpy(ctx->block + ctx->index, buffer, length);
99
return; /* Finished */
103
memcpy(ctx->block + ctx->index, buffer, left);
105
_nettle_sha256_compress(ctx->state, ctx->block, K);
112
while (length >= SHA256_DATA_SIZE)
114
_nettle_sha256_compress(ctx->state, buffer, K);
117
buffer += SHA256_DATA_SIZE;
118
length -= SHA256_DATA_SIZE;
120
/* Buffer leftovers */
121
/* NOTE: The corresponding sha1 code checks for the special case length == 0.
122
* That seems supoptimal, as I suspect it increases the number of branches. */
124
memcpy(ctx->block, buffer, length);
92
MD_UPDATE (ctx, length, data, COMPRESS, MD_INCR(ctx));
128
/* Final wrapup - pad to SHA1_DATA_SIZE-byte boundary with the bit pattern
129
1 0* (64-bit count of bits processed, MSB-first) */
132
sha256_final(struct sha256_ctx *ctx)
96
sha256_write_digest(struct sha256_ctx *ctx,
134
uint32_t bitcount_high;
135
uint32_t bitcount_low;
140
/* Set the first char of padding to 0x80. This is safe since there is
141
always at least one byte free */
143
assert(i < SHA256_DATA_SIZE);
144
ctx->block[i++] = 0x80;
146
if (i > (SHA1_DATA_SIZE - 8))
147
{ /* No room for length in this block. Process it and
148
* pad with another one */
149
memset(ctx->block + i, 0, SHA256_DATA_SIZE - i);
150
_nettle_sha256_compress(ctx->state, ctx->block, K);
155
if (i < (SHA256_DATA_SIZE - 8))
156
memset(ctx->block + i, 0, (SHA256_DATA_SIZE - 8) - i);
158
/* There are 512 = 2^9 bits in one block */
159
bitcount_high = (ctx->count_high << 9) | (ctx->count_low >> 23);
160
bitcount_low = (ctx->count_low << 9) | (ctx->index << 3);
102
assert(length <= SHA256_DIGEST_SIZE);
104
MD_PAD(ctx, 8, COMPRESS);
106
/* There are 512 = 2^9 bits in one block */
107
high = (ctx->count_high << 9) | (ctx->count_low >> 23);
108
low = (ctx->count_low << 9) | (ctx->index << 3);
162
110
/* This is slightly inefficient, as the numbers are converted to
163
111
big-endian format, and will be converted back by the compression
164
112
function. It's probably not worth the effort to fix this. */
165
WRITE_UINT32(ctx->block + (SHA256_DATA_SIZE - 8), bitcount_high);
166
WRITE_UINT32(ctx->block + (SHA256_DATA_SIZE - 4), bitcount_low);
113
WRITE_UINT32(ctx->block + (SHA256_DATA_SIZE - 8), high);
114
WRITE_UINT32(ctx->block + (SHA256_DATA_SIZE - 4), low);
115
COMPRESS(ctx, ctx->block);
168
_nettle_sha256_compress(ctx->state, ctx->block, K);
117
_nettle_write_be32(length, digest, ctx->state);