1 /* MIT (BSD) license - see LICENSE file for details */
2 /* RIPEMD core code translated from the Bitcoin project's C++:
4 * src/crypto/ripemd160.cpp commit f914f1a746d7f91951c1da262a4a749dd3ebfa71
5 * Copyright (c) 2014 The Bitcoin Core developers
6 * Distributed under the MIT software license, see the accompanying
7 * file COPYING or http://www.opensource.org/licenses/mit-license.php.
9 #include <ccan/crypto/ripemd160/ripemd160.h>
10 #include <ccan/endian/endian.h>
15 static void invalidate_ripemd160(struct ripemd160_ctx *ctx)
17 #ifdef CCAN_CRYPTO_RIPEMD160_USE_OPENSSL
24 static void check_ripemd160(struct ripemd160_ctx *ctx)
26 #ifdef CCAN_CRYPTO_RIPEMD160_USE_OPENSSL
27 assert(ctx->c.num != -1U);
29 assert(ctx->bytes != -1ULL);
33 #ifdef CCAN_CRYPTO_RIPEMD160_USE_OPENSSL
34 void ripemd160_init(struct ripemd160_ctx *ctx)
36 RIPEMD160_Init(&ctx->c);
39 void ripemd160_update(struct ripemd160_ctx *ctx, const void *p, size_t size)
42 RIPEMD160_Update(&ctx->c, p, size);
45 void ripemd160_done(struct ripemd160_ctx *ctx, struct ripemd160 *res)
47 RIPEMD160_Final(res->u.u8, &ctx->c);
48 invalidate_ripemd160(ctx);
51 static uint32_t inline f1(uint32_t x, uint32_t y, uint32_t z) { return x ^ y ^ z; }
52 static uint32_t inline f2(uint32_t x, uint32_t y, uint32_t z) { return (x & y) | (~x & z); }
53 static uint32_t inline f3(uint32_t x, uint32_t y, uint32_t z) { return (x | ~y) ^ z; }
54 static uint32_t inline f4(uint32_t x, uint32_t y, uint32_t z) { return (x & z) | (y & ~z); }
55 static uint32_t inline f5(uint32_t x, uint32_t y, uint32_t z) { return x ^ (y | ~z); }
57 /** Initialize RIPEMD-160 state. */
58 static void inline Initialize(uint32_t* s)
67 static uint32_t inline rol(uint32_t x, int i) { return (x << i) | (x >> (32 - i)); }
69 static void inline Round(uint32_t *a, uint32_t b, uint32_t *c, uint32_t d, uint32_t e, uint32_t f, uint32_t x, uint32_t k, int r)
71 *a = rol(*a + f + x + k, r) + e;
75 static void inline R11(uint32_t *a, uint32_t b, uint32_t *c, uint32_t d, uint32_t e, uint32_t x, int r) { Round(a, b, c, d, e, f1(b, *c, d), x, 0, r); }
76 static void inline R21(uint32_t *a, uint32_t b, uint32_t *c, uint32_t d, uint32_t e, uint32_t x, int r) { Round(a, b, c, d, e, f2(b, *c, d), x, 0x5A827999ul, r); }
77 static void inline R31(uint32_t *a, uint32_t b, uint32_t *c, uint32_t d, uint32_t e, uint32_t x, int r) { Round(a, b, c, d, e, f3(b, *c, d), x, 0x6ED9EBA1ul, r); }
78 static void inline R41(uint32_t *a, uint32_t b, uint32_t *c, uint32_t d, uint32_t e, uint32_t x, int r) { Round(a, b, c, d, e, f4(b, *c, d), x, 0x8F1BBCDCul, r); }
79 static void inline R51(uint32_t *a, uint32_t b, uint32_t *c, uint32_t d, uint32_t e, uint32_t x, int r) { Round(a, b, c, d, e, f5(b, *c, d), x, 0xA953FD4Eul, r); }
81 static void inline R12(uint32_t *a, uint32_t b, uint32_t *c, uint32_t d, uint32_t e, uint32_t x, int r) { Round(a, b, c, d, e, f5(b, *c, d), x, 0x50A28BE6ul, r); }
82 static void inline R22(uint32_t *a, uint32_t b, uint32_t *c, uint32_t d, uint32_t e, uint32_t x, int r) { Round(a, b, c, d, e, f4(b, *c, d), x, 0x5C4DD124ul, r); }
83 static void inline R32(uint32_t *a, uint32_t b, uint32_t *c, uint32_t d, uint32_t e, uint32_t x, int r) { Round(a, b, c, d, e, f3(b, *c, d), x, 0x6D703EF3ul, r); }
84 static void inline R42(uint32_t *a, uint32_t b, uint32_t *c, uint32_t d, uint32_t e, uint32_t x, int r) { Round(a, b, c, d, e, f2(b, *c, d), x, 0x7A6D76E9ul, r); }
85 static void inline R52(uint32_t *a, uint32_t b, uint32_t *c, uint32_t d, uint32_t e, uint32_t x, int r) { Round(a, b, c, d, e, f1(b, *c, d), x, 0, r); }
87 /** Perform a RIPEMD-160 transformation, processing a 64-byte chunk. */
88 static void Transform(uint32_t *s, const uint32_t *chunk)
90 uint32_t a1 = s[0], b1 = s[1], c1 = s[2], d1 = s[3], e1 = s[4];
91 uint32_t a2 = a1, b2 = b1, c2 = c1, d2 = d1, e2 = e1;
92 uint32_t w0 = le32_to_cpu(chunk[0]), w1 = le32_to_cpu(chunk[1]), w2 = le32_to_cpu(chunk[2]), w3 = le32_to_cpu(chunk[3]);
93 uint32_t w4 = le32_to_cpu(chunk[4]), w5 = le32_to_cpu(chunk[5]), w6 = le32_to_cpu(chunk[6]), w7 = le32_to_cpu(chunk[7]);
94 uint32_t w8 = le32_to_cpu(chunk[8]), w9 = le32_to_cpu(chunk[9]), w10 = le32_to_cpu(chunk[10]), w11 = le32_to_cpu(chunk[11]);
95 uint32_t w12 = le32_to_cpu(chunk[12]), w13 = le32_to_cpu(chunk[13]), w14 = le32_to_cpu(chunk[14]), w15 = le32_to_cpu(chunk[15]);
97 R11(&a1, b1, &c1, d1, e1, w0, 11);
98 R12(&a2, b2, &c2, d2, e2, w5, 8);
99 R11(&e1, a1, &b1, c1, d1, w1, 14);
100 R12(&e2, a2, &b2, c2, d2, w14, 9);
101 R11(&d1, e1, &a1, b1, c1, w2, 15);
102 R12(&d2, e2, &a2, b2, c2, w7, 9);
103 R11(&c1, d1, &e1, a1, b1, w3, 12);
104 R12(&c2, d2, &e2, a2, b2, w0, 11);
105 R11(&b1, c1, &d1, e1, a1, w4, 5);
106 R12(&b2, c2, &d2, e2, a2, w9, 13);
107 R11(&a1, b1, &c1, d1, e1, w5, 8);
108 R12(&a2, b2, &c2, d2, e2, w2, 15);
109 R11(&e1, a1, &b1, c1, d1, w6, 7);
110 R12(&e2, a2, &b2, c2, d2, w11, 15);
111 R11(&d1, e1, &a1, b1, c1, w7, 9);
112 R12(&d2, e2, &a2, b2, c2, w4, 5);
113 R11(&c1, d1, &e1, a1, b1, w8, 11);
114 R12(&c2, d2, &e2, a2, b2, w13, 7);
115 R11(&b1, c1, &d1, e1, a1, w9, 13);
116 R12(&b2, c2, &d2, e2, a2, w6, 7);
117 R11(&a1, b1, &c1, d1, e1, w10, 14);
118 R12(&a2, b2, &c2, d2, e2, w15, 8);
119 R11(&e1, a1, &b1, c1, d1, w11, 15);
120 R12(&e2, a2, &b2, c2, d2, w8, 11);
121 R11(&d1, e1, &a1, b1, c1, w12, 6);
122 R12(&d2, e2, &a2, b2, c2, w1, 14);
123 R11(&c1, d1, &e1, a1, b1, w13, 7);
124 R12(&c2, d2, &e2, a2, b2, w10, 14);
125 R11(&b1, c1, &d1, e1, a1, w14, 9);
126 R12(&b2, c2, &d2, e2, a2, w3, 12);
127 R11(&a1, b1, &c1, d1, e1, w15, 8);
128 R12(&a2, b2, &c2, d2, e2, w12, 6);
130 R21(&e1, a1, &b1, c1, d1, w7, 7);
131 R22(&e2, a2, &b2, c2, d2, w6, 9);
132 R21(&d1, e1, &a1, b1, c1, w4, 6);
133 R22(&d2, e2, &a2, b2, c2, w11, 13);
134 R21(&c1, d1, &e1, a1, b1, w13, 8);
135 R22(&c2, d2, &e2, a2, b2, w3, 15);
136 R21(&b1, c1, &d1, e1, a1, w1, 13);
137 R22(&b2, c2, &d2, e2, a2, w7, 7);
138 R21(&a1, b1, &c1, d1, e1, w10, 11);
139 R22(&a2, b2, &c2, d2, e2, w0, 12);
140 R21(&e1, a1, &b1, c1, d1, w6, 9);
141 R22(&e2, a2, &b2, c2, d2, w13, 8);
142 R21(&d1, e1, &a1, b1, c1, w15, 7);
143 R22(&d2, e2, &a2, b2, c2, w5, 9);
144 R21(&c1, d1, &e1, a1, b1, w3, 15);
145 R22(&c2, d2, &e2, a2, b2, w10, 11);
146 R21(&b1, c1, &d1, e1, a1, w12, 7);
147 R22(&b2, c2, &d2, e2, a2, w14, 7);
148 R21(&a1, b1, &c1, d1, e1, w0, 12);
149 R22(&a2, b2, &c2, d2, e2, w15, 7);
150 R21(&e1, a1, &b1, c1, d1, w9, 15);
151 R22(&e2, a2, &b2, c2, d2, w8, 12);
152 R21(&d1, e1, &a1, b1, c1, w5, 9);
153 R22(&d2, e2, &a2, b2, c2, w12, 7);
154 R21(&c1, d1, &e1, a1, b1, w2, 11);
155 R22(&c2, d2, &e2, a2, b2, w4, 6);
156 R21(&b1, c1, &d1, e1, a1, w14, 7);
157 R22(&b2, c2, &d2, e2, a2, w9, 15);
158 R21(&a1, b1, &c1, d1, e1, w11, 13);
159 R22(&a2, b2, &c2, d2, e2, w1, 13);
160 R21(&e1, a1, &b1, c1, d1, w8, 12);
161 R22(&e2, a2, &b2, c2, d2, w2, 11);
163 R31(&d1, e1, &a1, b1, c1, w3, 11);
164 R32(&d2, e2, &a2, b2, c2, w15, 9);
165 R31(&c1, d1, &e1, a1, b1, w10, 13);
166 R32(&c2, d2, &e2, a2, b2, w5, 7);
167 R31(&b1, c1, &d1, e1, a1, w14, 6);
168 R32(&b2, c2, &d2, e2, a2, w1, 15);
169 R31(&a1, b1, &c1, d1, e1, w4, 7);
170 R32(&a2, b2, &c2, d2, e2, w3, 11);
171 R31(&e1, a1, &b1, c1, d1, w9, 14);
172 R32(&e2, a2, &b2, c2, d2, w7, 8);
173 R31(&d1, e1, &a1, b1, c1, w15, 9);
174 R32(&d2, e2, &a2, b2, c2, w14, 6);
175 R31(&c1, d1, &e1, a1, b1, w8, 13);
176 R32(&c2, d2, &e2, a2, b2, w6, 6);
177 R31(&b1, c1, &d1, e1, a1, w1, 15);
178 R32(&b2, c2, &d2, e2, a2, w9, 14);
179 R31(&a1, b1, &c1, d1, e1, w2, 14);
180 R32(&a2, b2, &c2, d2, e2, w11, 12);
181 R31(&e1, a1, &b1, c1, d1, w7, 8);
182 R32(&e2, a2, &b2, c2, d2, w8, 13);
183 R31(&d1, e1, &a1, b1, c1, w0, 13);
184 R32(&d2, e2, &a2, b2, c2, w12, 5);
185 R31(&c1, d1, &e1, a1, b1, w6, 6);
186 R32(&c2, d2, &e2, a2, b2, w2, 14);
187 R31(&b1, c1, &d1, e1, a1, w13, 5);
188 R32(&b2, c2, &d2, e2, a2, w10, 13);
189 R31(&a1, b1, &c1, d1, e1, w11, 12);
190 R32(&a2, b2, &c2, d2, e2, w0, 13);
191 R31(&e1, a1, &b1, c1, d1, w5, 7);
192 R32(&e2, a2, &b2, c2, d2, w4, 7);
193 R31(&d1, e1, &a1, b1, c1, w12, 5);
194 R32(&d2, e2, &a2, b2, c2, w13, 5);
196 R41(&c1, d1, &e1, a1, b1, w1, 11);
197 R42(&c2, d2, &e2, a2, b2, w8, 15);
198 R41(&b1, c1, &d1, e1, a1, w9, 12);
199 R42(&b2, c2, &d2, e2, a2, w6, 5);
200 R41(&a1, b1, &c1, d1, e1, w11, 14);
201 R42(&a2, b2, &c2, d2, e2, w4, 8);
202 R41(&e1, a1, &b1, c1, d1, w10, 15);
203 R42(&e2, a2, &b2, c2, d2, w1, 11);
204 R41(&d1, e1, &a1, b1, c1, w0, 14);
205 R42(&d2, e2, &a2, b2, c2, w3, 14);
206 R41(&c1, d1, &e1, a1, b1, w8, 15);
207 R42(&c2, d2, &e2, a2, b2, w11, 14);
208 R41(&b1, c1, &d1, e1, a1, w12, 9);
209 R42(&b2, c2, &d2, e2, a2, w15, 6);
210 R41(&a1, b1, &c1, d1, e1, w4, 8);
211 R42(&a2, b2, &c2, d2, e2, w0, 14);
212 R41(&e1, a1, &b1, c1, d1, w13, 9);
213 R42(&e2, a2, &b2, c2, d2, w5, 6);
214 R41(&d1, e1, &a1, b1, c1, w3, 14);
215 R42(&d2, e2, &a2, b2, c2, w12, 9);
216 R41(&c1, d1, &e1, a1, b1, w7, 5);
217 R42(&c2, d2, &e2, a2, b2, w2, 12);
218 R41(&b1, c1, &d1, e1, a1, w15, 6);
219 R42(&b2, c2, &d2, e2, a2, w13, 9);
220 R41(&a1, b1, &c1, d1, e1, w14, 8);
221 R42(&a2, b2, &c2, d2, e2, w9, 12);
222 R41(&e1, a1, &b1, c1, d1, w5, 6);
223 R42(&e2, a2, &b2, c2, d2, w7, 5);
224 R41(&d1, e1, &a1, b1, c1, w6, 5);
225 R42(&d2, e2, &a2, b2, c2, w10, 15);
226 R41(&c1, d1, &e1, a1, b1, w2, 12);
227 R42(&c2, d2, &e2, a2, b2, w14, 8);
229 R51(&b1, c1, &d1, e1, a1, w4, 9);
230 R52(&b2, c2, &d2, e2, a2, w12, 8);
231 R51(&a1, b1, &c1, d1, e1, w0, 15);
232 R52(&a2, b2, &c2, d2, e2, w15, 5);
233 R51(&e1, a1, &b1, c1, d1, w5, 5);
234 R52(&e2, a2, &b2, c2, d2, w10, 12);
235 R51(&d1, e1, &a1, b1, c1, w9, 11);
236 R52(&d2, e2, &a2, b2, c2, w4, 9);
237 R51(&c1, d1, &e1, a1, b1, w7, 6);
238 R52(&c2, d2, &e2, a2, b2, w1, 12);
239 R51(&b1, c1, &d1, e1, a1, w12, 8);
240 R52(&b2, c2, &d2, e2, a2, w5, 5);
241 R51(&a1, b1, &c1, d1, e1, w2, 13);
242 R52(&a2, b2, &c2, d2, e2, w8, 14);
243 R51(&e1, a1, &b1, c1, d1, w10, 12);
244 R52(&e2, a2, &b2, c2, d2, w7, 6);
245 R51(&d1, e1, &a1, b1, c1, w14, 5);
246 R52(&d2, e2, &a2, b2, c2, w6, 8);
247 R51(&c1, d1, &e1, a1, b1, w1, 12);
248 R52(&c2, d2, &e2, a2, b2, w2, 13);
249 R51(&b1, c1, &d1, e1, a1, w3, 13);
250 R52(&b2, c2, &d2, e2, a2, w13, 6);
251 R51(&a1, b1, &c1, d1, e1, w8, 14);
252 R52(&a2, b2, &c2, d2, e2, w14, 5);
253 R51(&e1, a1, &b1, c1, d1, w11, 11);
254 R52(&e2, a2, &b2, c2, d2, w0, 15);
255 R51(&d1, e1, &a1, b1, c1, w6, 8);
256 R52(&d2, e2, &a2, b2, c2, w3, 13);
257 R51(&c1, d1, &e1, a1, b1, w15, 5);
258 R52(&c2, d2, &e2, a2, b2, w9, 11);
259 R51(&b1, c1, &d1, e1, a1, w13, 6);
260 R52(&b2, c2, &d2, e2, a2, w11, 11);
263 s[0] = s[1] + c1 + d2;
264 s[1] = s[2] + d1 + e2;
265 s[2] = s[3] + e1 + a2;
266 s[3] = s[4] + a1 + b2;
270 static bool alignment_ok(const void *p, size_t n)
272 #if HAVE_UNALIGNED_ACCESS
275 return ((size_t)p % n == 0);
279 static void add(struct ripemd160_ctx *ctx, const void *p, size_t len)
281 const unsigned char *data = p;
282 size_t bufsize = ctx->bytes % 64;
284 if (bufsize + len >= 64) {
285 // Fill the buffer, and process it.
286 memcpy(ctx->buf.u8 + bufsize, data, 64 - bufsize);
287 ctx->bytes += 64 - bufsize;
288 data += 64 - bufsize;
290 Transform(ctx->s, ctx->buf.u32);
295 // Process full chunks directly from the source.
296 if (alignment_ok(data, sizeof(uint32_t)))
297 Transform(ctx->s, (const uint32_t *)data);
299 memcpy(ctx->buf.u8, data, sizeof(ctx->buf));
300 Transform(ctx->s, ctx->buf.u32);
308 // Fill the buffer with what remains.
309 memcpy(ctx->buf.u8 + bufsize, data, len);
314 void ripemd160_init(struct ripemd160_ctx *ctx)
316 struct ripemd160_ctx init = RIPEMD160_INIT;
320 void ripemd160_update(struct ripemd160_ctx *ctx, const void *p, size_t size)
322 check_ripemd160(ctx);
326 void ripemd160_done(struct ripemd160_ctx *ctx, struct ripemd160 *res)
328 static const unsigned char pad[64] = {0x80};
332 sizedesc = cpu_to_le64(ctx->bytes << 3);
333 /* Add '1' bit to terminate, then all 0 bits, up to next block - 8. */
334 add(ctx, pad, 1 + ((119 - (ctx->bytes % 64)) % 64));
335 /* Add number of bits of data (big endian) */
336 add(ctx, &sizedesc, 8);
337 for (i = 0; i < sizeof(ctx->s) / sizeof(ctx->s[0]); i++)
338 res->u.u32[i] = cpu_to_le32(ctx->s[i]);
339 invalidate_ripemd160(ctx);
343 void ripemd160(struct ripemd160 *sha, const void *p, size_t size)
345 struct ripemd160_ctx ctx;
347 ripemd160_init(&ctx);
348 ripemd160_update(&ctx, p, size);
349 ripemd160_done(&ctx, sha);
352 void ripemd160_u8(struct ripemd160_ctx *ctx, uint8_t v)
354 ripemd160_update(ctx, &v, sizeof(v));
357 void ripemd160_u16(struct ripemd160_ctx *ctx, uint16_t v)
359 ripemd160_update(ctx, &v, sizeof(v));
362 void ripemd160_u32(struct ripemd160_ctx *ctx, uint32_t v)
364 ripemd160_update(ctx, &v, sizeof(v));
367 void ripemd160_u64(struct ripemd160_ctx *ctx, uint64_t v)
369 ripemd160_update(ctx, &v, sizeof(v));
372 /* Add as little-endian */
373 void ripemd160_le16(struct ripemd160_ctx *ctx, uint16_t v)
375 leint16_t lev = cpu_to_le16(v);
376 ripemd160_update(ctx, &lev, sizeof(lev));
379 void ripemd160_le32(struct ripemd160_ctx *ctx, uint32_t v)
381 leint32_t lev = cpu_to_le32(v);
382 ripemd160_update(ctx, &lev, sizeof(lev));
385 void ripemd160_le64(struct ripemd160_ctx *ctx, uint64_t v)
387 leint64_t lev = cpu_to_le64(v);
388 ripemd160_update(ctx, &lev, sizeof(lev));
391 /* Add as big-endian */
392 void ripemd160_be16(struct ripemd160_ctx *ctx, uint16_t v)
394 beint16_t bev = cpu_to_be16(v);
395 ripemd160_update(ctx, &bev, sizeof(bev));
398 void ripemd160_be32(struct ripemd160_ctx *ctx, uint32_t v)
400 beint32_t bev = cpu_to_be32(v);
401 ripemd160_update(ctx, &bev, sizeof(bev));
404 void ripemd160_be64(struct ripemd160_ctx *ctx, uint64_t v)
406 beint64_t bev = cpu_to_be64(v);
407 ripemd160_update(ctx, &bev, sizeof(bev));