1 /* MIT (BSD) license - see LICENSE file for details */
2 /* RIPEMD core code translated from the Bitcoin project's C++:
4 * src/crypto/ripemd160.cpp commit f914f1a746d7f91951c1da262a4a749dd3ebfa71
5 * Copyright (c) 2014 The Bitcoin Core developers
6 * Distributed under the MIT software license, see the accompanying
7 * file COPYING or http://www.opensource.org/licenses/mit-license.php.
9 #include <ccan/crypto/ripemd160/ripemd160.h>
10 #include <ccan/endian/endian.h>
11 #include <ccan/compiler/compiler.h>
16 static void invalidate_ripemd160(struct ripemd160_ctx *ctx)
18 #ifdef CCAN_CRYPTO_RIPEMD160_USE_OPENSSL
25 static void check_ripemd160(struct ripemd160_ctx *ctx UNUSED)
27 #ifdef CCAN_CRYPTO_RIPEMD160_USE_OPENSSL
28 assert(ctx->c.num != -1U);
30 assert(ctx->bytes != -1ULL);
34 #ifdef CCAN_CRYPTO_RIPEMD160_USE_OPENSSL
35 void ripemd160_init(struct ripemd160_ctx *ctx)
37 RIPEMD160_Init(&ctx->c);
40 void ripemd160_update(struct ripemd160_ctx *ctx, const void *p, size_t size)
43 RIPEMD160_Update(&ctx->c, p, size);
46 void ripemd160_done(struct ripemd160_ctx *ctx, struct ripemd160 *res)
48 RIPEMD160_Final(res->u.u8, &ctx->c);
49 invalidate_ripemd160(ctx);
52 static uint32_t inline f1(uint32_t x, uint32_t y, uint32_t z) { return x ^ y ^ z; }
53 static uint32_t inline f2(uint32_t x, uint32_t y, uint32_t z) { return (x & y) | (~x & z); }
54 static uint32_t inline f3(uint32_t x, uint32_t y, uint32_t z) { return (x | ~y) ^ z; }
55 static uint32_t inline f4(uint32_t x, uint32_t y, uint32_t z) { return (x & z) | (y & ~z); }
56 static uint32_t inline f5(uint32_t x, uint32_t y, uint32_t z) { return x ^ (y | ~z); }
58 static uint32_t inline rol(uint32_t x, int i) { return (x << i) | (x >> (32 - i)); }
60 static void inline Round(uint32_t *a, uint32_t b UNUSED, uint32_t *c, uint32_t d UNUSED, uint32_t e, uint32_t f, uint32_t x, uint32_t k, int r)
62 *a = rol(*a + f + x + k, r) + e;
66 static void inline R11(uint32_t *a, uint32_t b, uint32_t *c, uint32_t d, uint32_t e, uint32_t x, int r) { Round(a, b, c, d, e, f1(b, *c, d), x, 0, r); }
67 static void inline R21(uint32_t *a, uint32_t b, uint32_t *c, uint32_t d, uint32_t e, uint32_t x, int r) { Round(a, b, c, d, e, f2(b, *c, d), x, 0x5A827999ul, r); }
68 static void inline R31(uint32_t *a, uint32_t b, uint32_t *c, uint32_t d, uint32_t e, uint32_t x, int r) { Round(a, b, c, d, e, f3(b, *c, d), x, 0x6ED9EBA1ul, r); }
69 static void inline R41(uint32_t *a, uint32_t b, uint32_t *c, uint32_t d, uint32_t e, uint32_t x, int r) { Round(a, b, c, d, e, f4(b, *c, d), x, 0x8F1BBCDCul, r); }
70 static void inline R51(uint32_t *a, uint32_t b, uint32_t *c, uint32_t d, uint32_t e, uint32_t x, int r) { Round(a, b, c, d, e, f5(b, *c, d), x, 0xA953FD4Eul, r); }
72 static void inline R12(uint32_t *a, uint32_t b, uint32_t *c, uint32_t d, uint32_t e, uint32_t x, int r) { Round(a, b, c, d, e, f5(b, *c, d), x, 0x50A28BE6ul, r); }
73 static void inline R22(uint32_t *a, uint32_t b, uint32_t *c, uint32_t d, uint32_t e, uint32_t x, int r) { Round(a, b, c, d, e, f4(b, *c, d), x, 0x5C4DD124ul, r); }
74 static void inline R32(uint32_t *a, uint32_t b, uint32_t *c, uint32_t d, uint32_t e, uint32_t x, int r) { Round(a, b, c, d, e, f3(b, *c, d), x, 0x6D703EF3ul, r); }
75 static void inline R42(uint32_t *a, uint32_t b, uint32_t *c, uint32_t d, uint32_t e, uint32_t x, int r) { Round(a, b, c, d, e, f2(b, *c, d), x, 0x7A6D76E9ul, r); }
76 static void inline R52(uint32_t *a, uint32_t b, uint32_t *c, uint32_t d, uint32_t e, uint32_t x, int r) { Round(a, b, c, d, e, f1(b, *c, d), x, 0, r); }
78 /** Perform a RIPEMD-160 transformation, processing a 64-byte chunk. */
79 static void Transform(uint32_t *s, const uint32_t *chunk)
81 uint32_t a1 = s[0], b1 = s[1], c1 = s[2], d1 = s[3], e1 = s[4];
82 uint32_t a2 = a1, b2 = b1, c2 = c1, d2 = d1, e2 = e1;
83 uint32_t w0 = le32_to_cpu(chunk[0]), w1 = le32_to_cpu(chunk[1]), w2 = le32_to_cpu(chunk[2]), w3 = le32_to_cpu(chunk[3]);
84 uint32_t w4 = le32_to_cpu(chunk[4]), w5 = le32_to_cpu(chunk[5]), w6 = le32_to_cpu(chunk[6]), w7 = le32_to_cpu(chunk[7]);
85 uint32_t w8 = le32_to_cpu(chunk[8]), w9 = le32_to_cpu(chunk[9]), w10 = le32_to_cpu(chunk[10]), w11 = le32_to_cpu(chunk[11]);
86 uint32_t w12 = le32_to_cpu(chunk[12]), w13 = le32_to_cpu(chunk[13]), w14 = le32_to_cpu(chunk[14]), w15 = le32_to_cpu(chunk[15]);
89 R11(&a1, b1, &c1, d1, e1, w0, 11);
90 R12(&a2, b2, &c2, d2, e2, w5, 8);
91 R11(&e1, a1, &b1, c1, d1, w1, 14);
92 R12(&e2, a2, &b2, c2, d2, w14, 9);
93 R11(&d1, e1, &a1, b1, c1, w2, 15);
94 R12(&d2, e2, &a2, b2, c2, w7, 9);
95 R11(&c1, d1, &e1, a1, b1, w3, 12);
96 R12(&c2, d2, &e2, a2, b2, w0, 11);
97 R11(&b1, c1, &d1, e1, a1, w4, 5);
98 R12(&b2, c2, &d2, e2, a2, w9, 13);
99 R11(&a1, b1, &c1, d1, e1, w5, 8);
100 R12(&a2, b2, &c2, d2, e2, w2, 15);
101 R11(&e1, a1, &b1, c1, d1, w6, 7);
102 R12(&e2, a2, &b2, c2, d2, w11, 15);
103 R11(&d1, e1, &a1, b1, c1, w7, 9);
104 R12(&d2, e2, &a2, b2, c2, w4, 5);
105 R11(&c1, d1, &e1, a1, b1, w8, 11);
106 R12(&c2, d2, &e2, a2, b2, w13, 7);
107 R11(&b1, c1, &d1, e1, a1, w9, 13);
108 R12(&b2, c2, &d2, e2, a2, w6, 7);
109 R11(&a1, b1, &c1, d1, e1, w10, 14);
110 R12(&a2, b2, &c2, d2, e2, w15, 8);
111 R11(&e1, a1, &b1, c1, d1, w11, 15);
112 R12(&e2, a2, &b2, c2, d2, w8, 11);
113 R11(&d1, e1, &a1, b1, c1, w12, 6);
114 R12(&d2, e2, &a2, b2, c2, w1, 14);
115 R11(&c1, d1, &e1, a1, b1, w13, 7);
116 R12(&c2, d2, &e2, a2, b2, w10, 14);
117 R11(&b1, c1, &d1, e1, a1, w14, 9);
118 R12(&b2, c2, &d2, e2, a2, w3, 12);
119 R11(&a1, b1, &c1, d1, e1, w15, 8);
120 R12(&a2, b2, &c2, d2, e2, w12, 6);
122 R21(&e1, a1, &b1, c1, d1, w7, 7);
123 R22(&e2, a2, &b2, c2, d2, w6, 9);
124 R21(&d1, e1, &a1, b1, c1, w4, 6);
125 R22(&d2, e2, &a2, b2, c2, w11, 13);
126 R21(&c1, d1, &e1, a1, b1, w13, 8);
127 R22(&c2, d2, &e2, a2, b2, w3, 15);
128 R21(&b1, c1, &d1, e1, a1, w1, 13);
129 R22(&b2, c2, &d2, e2, a2, w7, 7);
130 R21(&a1, b1, &c1, d1, e1, w10, 11);
131 R22(&a2, b2, &c2, d2, e2, w0, 12);
132 R21(&e1, a1, &b1, c1, d1, w6, 9);
133 R22(&e2, a2, &b2, c2, d2, w13, 8);
134 R21(&d1, e1, &a1, b1, c1, w15, 7);
135 R22(&d2, e2, &a2, b2, c2, w5, 9);
136 R21(&c1, d1, &e1, a1, b1, w3, 15);
137 R22(&c2, d2, &e2, a2, b2, w10, 11);
138 R21(&b1, c1, &d1, e1, a1, w12, 7);
139 R22(&b2, c2, &d2, e2, a2, w14, 7);
140 R21(&a1, b1, &c1, d1, e1, w0, 12);
141 R22(&a2, b2, &c2, d2, e2, w15, 7);
142 R21(&e1, a1, &b1, c1, d1, w9, 15);
143 R22(&e2, a2, &b2, c2, d2, w8, 12);
144 R21(&d1, e1, &a1, b1, c1, w5, 9);
145 R22(&d2, e2, &a2, b2, c2, w12, 7);
146 R21(&c1, d1, &e1, a1, b1, w2, 11);
147 R22(&c2, d2, &e2, a2, b2, w4, 6);
148 R21(&b1, c1, &d1, e1, a1, w14, 7);
149 R22(&b2, c2, &d2, e2, a2, w9, 15);
150 R21(&a1, b1, &c1, d1, e1, w11, 13);
151 R22(&a2, b2, &c2, d2, e2, w1, 13);
152 R21(&e1, a1, &b1, c1, d1, w8, 12);
153 R22(&e2, a2, &b2, c2, d2, w2, 11);
155 R31(&d1, e1, &a1, b1, c1, w3, 11);
156 R32(&d2, e2, &a2, b2, c2, w15, 9);
157 R31(&c1, d1, &e1, a1, b1, w10, 13);
158 R32(&c2, d2, &e2, a2, b2, w5, 7);
159 R31(&b1, c1, &d1, e1, a1, w14, 6);
160 R32(&b2, c2, &d2, e2, a2, w1, 15);
161 R31(&a1, b1, &c1, d1, e1, w4, 7);
162 R32(&a2, b2, &c2, d2, e2, w3, 11);
163 R31(&e1, a1, &b1, c1, d1, w9, 14);
164 R32(&e2, a2, &b2, c2, d2, w7, 8);
165 R31(&d1, e1, &a1, b1, c1, w15, 9);
166 R32(&d2, e2, &a2, b2, c2, w14, 6);
167 R31(&c1, d1, &e1, a1, b1, w8, 13);
168 R32(&c2, d2, &e2, a2, b2, w6, 6);
169 R31(&b1, c1, &d1, e1, a1, w1, 15);
170 R32(&b2, c2, &d2, e2, a2, w9, 14);
171 R31(&a1, b1, &c1, d1, e1, w2, 14);
172 R32(&a2, b2, &c2, d2, e2, w11, 12);
173 R31(&e1, a1, &b1, c1, d1, w7, 8);
174 R32(&e2, a2, &b2, c2, d2, w8, 13);
175 R31(&d1, e1, &a1, b1, c1, w0, 13);
176 R32(&d2, e2, &a2, b2, c2, w12, 5);
177 R31(&c1, d1, &e1, a1, b1, w6, 6);
178 R32(&c2, d2, &e2, a2, b2, w2, 14);
179 R31(&b1, c1, &d1, e1, a1, w13, 5);
180 R32(&b2, c2, &d2, e2, a2, w10, 13);
181 R31(&a1, b1, &c1, d1, e1, w11, 12);
182 R32(&a2, b2, &c2, d2, e2, w0, 13);
183 R31(&e1, a1, &b1, c1, d1, w5, 7);
184 R32(&e2, a2, &b2, c2, d2, w4, 7);
185 R31(&d1, e1, &a1, b1, c1, w12, 5);
186 R32(&d2, e2, &a2, b2, c2, w13, 5);
188 R41(&c1, d1, &e1, a1, b1, w1, 11);
189 R42(&c2, d2, &e2, a2, b2, w8, 15);
190 R41(&b1, c1, &d1, e1, a1, w9, 12);
191 R42(&b2, c2, &d2, e2, a2, w6, 5);
192 R41(&a1, b1, &c1, d1, e1, w11, 14);
193 R42(&a2, b2, &c2, d2, e2, w4, 8);
194 R41(&e1, a1, &b1, c1, d1, w10, 15);
195 R42(&e2, a2, &b2, c2, d2, w1, 11);
196 R41(&d1, e1, &a1, b1, c1, w0, 14);
197 R42(&d2, e2, &a2, b2, c2, w3, 14);
198 R41(&c1, d1, &e1, a1, b1, w8, 15);
199 R42(&c2, d2, &e2, a2, b2, w11, 14);
200 R41(&b1, c1, &d1, e1, a1, w12, 9);
201 R42(&b2, c2, &d2, e2, a2, w15, 6);
202 R41(&a1, b1, &c1, d1, e1, w4, 8);
203 R42(&a2, b2, &c2, d2, e2, w0, 14);
204 R41(&e1, a1, &b1, c1, d1, w13, 9);
205 R42(&e2, a2, &b2, c2, d2, w5, 6);
206 R41(&d1, e1, &a1, b1, c1, w3, 14);
207 R42(&d2, e2, &a2, b2, c2, w12, 9);
208 R41(&c1, d1, &e1, a1, b1, w7, 5);
209 R42(&c2, d2, &e2, a2, b2, w2, 12);
210 R41(&b1, c1, &d1, e1, a1, w15, 6);
211 R42(&b2, c2, &d2, e2, a2, w13, 9);
212 R41(&a1, b1, &c1, d1, e1, w14, 8);
213 R42(&a2, b2, &c2, d2, e2, w9, 12);
214 R41(&e1, a1, &b1, c1, d1, w5, 6);
215 R42(&e2, a2, &b2, c2, d2, w7, 5);
216 R41(&d1, e1, &a1, b1, c1, w6, 5);
217 R42(&d2, e2, &a2, b2, c2, w10, 15);
218 R41(&c1, d1, &e1, a1, b1, w2, 12);
219 R42(&c2, d2, &e2, a2, b2, w14, 8);
221 R51(&b1, c1, &d1, e1, a1, w4, 9);
222 R52(&b2, c2, &d2, e2, a2, w12, 8);
223 R51(&a1, b1, &c1, d1, e1, w0, 15);
224 R52(&a2, b2, &c2, d2, e2, w15, 5);
225 R51(&e1, a1, &b1, c1, d1, w5, 5);
226 R52(&e2, a2, &b2, c2, d2, w10, 12);
227 R51(&d1, e1, &a1, b1, c1, w9, 11);
228 R52(&d2, e2, &a2, b2, c2, w4, 9);
229 R51(&c1, d1, &e1, a1, b1, w7, 6);
230 R52(&c2, d2, &e2, a2, b2, w1, 12);
231 R51(&b1, c1, &d1, e1, a1, w12, 8);
232 R52(&b2, c2, &d2, e2, a2, w5, 5);
233 R51(&a1, b1, &c1, d1, e1, w2, 13);
234 R52(&a2, b2, &c2, d2, e2, w8, 14);
235 R51(&e1, a1, &b1, c1, d1, w10, 12);
236 R52(&e2, a2, &b2, c2, d2, w7, 6);
237 R51(&d1, e1, &a1, b1, c1, w14, 5);
238 R52(&d2, e2, &a2, b2, c2, w6, 8);
239 R51(&c1, d1, &e1, a1, b1, w1, 12);
240 R52(&c2, d2, &e2, a2, b2, w2, 13);
241 R51(&b1, c1, &d1, e1, a1, w3, 13);
242 R52(&b2, c2, &d2, e2, a2, w13, 6);
243 R51(&a1, b1, &c1, d1, e1, w8, 14);
244 R52(&a2, b2, &c2, d2, e2, w14, 5);
245 R51(&e1, a1, &b1, c1, d1, w11, 11);
246 R52(&e2, a2, &b2, c2, d2, w0, 15);
247 R51(&d1, e1, &a1, b1, c1, w6, 8);
248 R52(&d2, e2, &a2, b2, c2, w3, 13);
249 R51(&c1, d1, &e1, a1, b1, w15, 5);
250 R52(&c2, d2, &e2, a2, b2, w9, 11);
251 R51(&b1, c1, &d1, e1, a1, w13, 6);
252 R52(&b2, c2, &d2, e2, a2, w11, 11);
255 s[0] = s[1] + c1 + d2;
256 s[1] = s[2] + d1 + e2;
257 s[2] = s[3] + e1 + a2;
258 s[3] = s[4] + a1 + b2;
262 static bool alignment_ok(const void *p UNUSED, size_t n UNUSED)
264 #if HAVE_UNALIGNED_ACCESS
267 return ((size_t)p % n == 0);
271 static void add(struct ripemd160_ctx *ctx, const void *p, size_t len)
273 const unsigned char *data = p;
274 size_t bufsize = ctx->bytes % 64;
276 if (bufsize + len >= 64) {
277 /* Fill the buffer, and process it. */
278 memcpy(ctx->buf.u8 + bufsize, data, 64 - bufsize);
279 ctx->bytes += 64 - bufsize;
280 data += 64 - bufsize;
282 Transform(ctx->s, ctx->buf.u32);
287 /* Process full chunks directly from the source. */
288 if (alignment_ok(data, sizeof(uint32_t)))
289 Transform(ctx->s, (const uint32_t *)data);
291 memcpy(ctx->buf.u8, data, sizeof(ctx->buf));
292 Transform(ctx->s, ctx->buf.u32);
300 /* Fill the buffer with what remains. */
301 memcpy(ctx->buf.u8 + bufsize, data, len);
306 void ripemd160_init(struct ripemd160_ctx *ctx)
308 struct ripemd160_ctx init = RIPEMD160_INIT;
312 void ripemd160_update(struct ripemd160_ctx *ctx, const void *p, size_t size)
314 check_ripemd160(ctx);
318 void ripemd160_done(struct ripemd160_ctx *ctx, struct ripemd160 *res)
320 static const unsigned char pad[64] = {0x80};
324 sizedesc = cpu_to_le64(ctx->bytes << 3);
325 /* Add '1' bit to terminate, then all 0 bits, up to next block - 8. */
326 add(ctx, pad, 1 + ((119 - (ctx->bytes % 64)) % 64));
327 /* Add number of bits of data (big endian) */
328 add(ctx, &sizedesc, 8);
329 for (i = 0; i < sizeof(ctx->s) / sizeof(ctx->s[0]); i++)
330 res->u.u32[i] = cpu_to_le32(ctx->s[i]);
331 invalidate_ripemd160(ctx);
335 void ripemd160(struct ripemd160 *ripemd, const void *p, size_t size)
337 struct ripemd160_ctx ctx;
339 ripemd160_init(&ctx);
340 ripemd160_update(&ctx, p, size);
341 ripemd160_done(&ctx, ripemd);
344 void ripemd160_u8(struct ripemd160_ctx *ctx, uint8_t v)
346 ripemd160_update(ctx, &v, sizeof(v));
349 void ripemd160_u16(struct ripemd160_ctx *ctx, uint16_t v)
351 ripemd160_update(ctx, &v, sizeof(v));
354 void ripemd160_u32(struct ripemd160_ctx *ctx, uint32_t v)
356 ripemd160_update(ctx, &v, sizeof(v));
359 void ripemd160_u64(struct ripemd160_ctx *ctx, uint64_t v)
361 ripemd160_update(ctx, &v, sizeof(v));
364 /* Add as little-endian */
365 void ripemd160_le16(struct ripemd160_ctx *ctx, uint16_t v)
367 leint16_t lev = cpu_to_le16(v);
368 ripemd160_update(ctx, &lev, sizeof(lev));
371 void ripemd160_le32(struct ripemd160_ctx *ctx, uint32_t v)
373 leint32_t lev = cpu_to_le32(v);
374 ripemd160_update(ctx, &lev, sizeof(lev));
377 void ripemd160_le64(struct ripemd160_ctx *ctx, uint64_t v)
379 leint64_t lev = cpu_to_le64(v);
380 ripemd160_update(ctx, &lev, sizeof(lev));
383 /* Add as big-endian */
384 void ripemd160_be16(struct ripemd160_ctx *ctx, uint16_t v)
386 beint16_t bev = cpu_to_be16(v);
387 ripemd160_update(ctx, &bev, sizeof(bev));
390 void ripemd160_be32(struct ripemd160_ctx *ctx, uint32_t v)
392 beint32_t bev = cpu_to_be32(v);
393 ripemd160_update(ctx, &bev, sizeof(bev));
396 void ripemd160_be64(struct ripemd160_ctx *ctx, uint64_t v)
398 beint64_t bev = cpu_to_be64(v);
399 ripemd160_update(ctx, &bev, sizeof(bev));