1 /* ====================================================================
2 * Copyright (c) 1999-2002 The OpenSSL Project. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in
13 * the documentation and/or other materials provided with the
16 * 3. All advertising materials mentioning features or use of this
17 * software must display the following acknowledgment:
18 * "This product includes software developed by the OpenSSL Project
19 * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
21 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
22 * endorse or promote products derived from this software without
23 * prior written permission. For written permission, please contact
24 * licensing@OpenSSL.org.
26 * 5. Products derived from this software may not be called "OpenSSL"
27 * nor may "OpenSSL" appear in their names without prior written
28 * permission of the OpenSSL Project.
30 * 6. Redistributions of any form whatsoever must retain the following
32 * "This product includes software developed by the OpenSSL Project
33 * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
35 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
36 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
38 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
39 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
41 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
42 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
44 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
45 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
46 * OF THE POSSIBILITY OF SUCH DAMAGE.
47 * ====================================================================
49 * This product includes cryptographic software written by Eric Young
50 * (eay@cryptsoft.com). This product includes software written by Tim
51 * Hudson (tjh@cryptsoft.com).
56 * This is a generic 32 bit "collector" for message digest algorithms.
57 * Whenever needed it collects input character stream into chunks of
58 * 32 bit values and invokes a block function that performs actual hash
65 * DATA_ORDER_IS_BIG_ENDIAN or DATA_ORDER_IS_LITTLE_ENDIAN
66 * this macro defines byte order of input stream.
68 * size of a unit chunk HASH_BLOCK operates on.
70 * has to be at lest 32 bit wide, if it's wider, then
71 * HASH_LONG_LOG2 *has to* be defined along
73 * context structure that at least contains following
78 * HASH_LONG data[HASH_LBLOCK];
83 * name of "Update" function, implemented here.
85 * name of "Transform" function, implemented here.
87 * name of "Final" function, implemented here.
88 * HASH_BLOCK_HOST_ORDER
89 * name of "block" function treating *aligned* input message
90 * in host byte order, implemented externally.
91 * HASH_BLOCK_DATA_ORDER
92 * name of "block" function treating *unaligned* input message
93 * in original (data) byte order, implemented externally (it
94 * actually is optional if data and host are of the same
97 * macro convering context variables to an ASCII hash string.
101 * B_ENDIAN or L_ENDIAN
102 * defines host byte-order.
104 * defaults to 2 if not states otherwise.
106 * assumed to be HASH_CBLOCK/4 if not stated otherwise.
107 * HASH_BLOCK_DATA_ORDER_ALIGNED
108 * alternative "block" function capable of treating
109 * aligned input message in original (data) order,
110 * implemented externally.
114 * #define DATA_ORDER_IS_LITTLE_ENDIAN
116 * #define HASH_LONG MD5_LONG
117 * #define HASH_LONG_LOG2 MD5_LONG_LOG2
118 * #define HASH_CTX MD5_CTX
119 * #define HASH_CBLOCK MD5_CBLOCK
120 * #define HASH_LBLOCK MD5_LBLOCK
121 * #define HASH_UPDATE MD5_Update
122 * #define HASH_TRANSFORM MD5_Transform
123 * #define HASH_FINAL MD5_Final
124 * #define HASH_BLOCK_HOST_ORDER md5_block_host_order
125 * #define HASH_BLOCK_DATA_ORDER md5_block_data_order
127 * <appro@fy.chalmers.se>
131 #ifndef _MD32_COMMON__H
132 #define _MD32_COMMON__H
135 #if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN)
136 #error "DATA_ORDER must be defined!"
140 #error "HASH_CBLOCK must be defined!"
143 #error "HASH_LONG must be defined!"
146 #error "HASH_CTX must be defined!"
150 #error "HASH_UPDATE must be defined!"
152 #ifndef HASH_TRANSFORM
153 #error "HASH_TRANSFORM must be defined!"
156 #error "HASH_FINAL must be defined!"
159 #ifndef HASH_BLOCK_HOST_ORDER
160 #error "HASH_BLOCK_HOST_ORDER must be defined!"
165 * Moved below as it's required only if HASH_BLOCK_DATA_ORDER_ALIGNED
168 #ifndef HASH_BLOCK_DATA_ORDER
169 #error "HASH_BLOCK_DATA_ORDER must be defined!"
174 #define HASH_LBLOCK (HASH_CBLOCK/4)
177 #ifndef HASH_LONG_LOG2
178 #define HASH_LONG_LOG2 2
182 * Engage compiler specific rotate intrinsic function if available.
186 # if defined(_MSC_VER) || defined(__ICC)
187 # define ROTATE(a,n) _lrotl(a,n)
188 # elif defined(__MWERKS__)
189 # if defined(__POWERPC__)
190 # define ROTATE(a,n) __rlwinm(a,n,0,31)
191 # elif defined(__MC68K__)
192 /* Motorola specific tweak. <appro@fy.chalmers.se> */
193 # define ROTATE(a,n) ( n<24 ? __rol(a,n) : __ror(a,32-n) )
195 # define ROTATE(a,n) __rol(a,n)
197 # elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
199 * Some GNU C inline assembler templates. Note that these are
200 * rotates by *constant* number of bits! But that's exactly
201 * what we need here...
202 * <appro@fy.chalmers.se>
204 # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
205 # define ROTATE(a,n) ({ register unsigned int ret; \
213 # elif defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__)
214 # define ROTATE(a,n) ({ register unsigned int ret; \
216 "rlwinm %0,%1,%2,0,31" \
223 #endif /* PEDANTIC */
225 #if HASH_LONG_LOG2==2 /* Engage only if sizeof(HASH_LONG)== 4 */
226 /* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */
228 /* 5 instructions with rotate instruction, else 9 */
229 #define REVERSE_FETCH32(a,l) ( \
230 l=*(const HASH_LONG *)(a), \
231 ((ROTATE(l,8)&0x00FF00FF)|(ROTATE((l&0x00FF00FF),24))) \
234 /* 6 instructions with rotate instruction, else 8 */
235 #define REVERSE_FETCH32(a,l) ( \
236 l=*(const HASH_LONG *)(a), \
237 l=(((l>>8)&0x00FF00FF)|((l&0x00FF00FF)<<8)), \
241 * Originally the middle line started with l=(((l&0xFF00FF00)>>8)|...
242 * It's rewritten as above for two reasons:
243 * - RISCs aren't good at long constants and have to explicitely
244 * compose 'em with several (well, usually 2) instructions in a
245 * register before performing the actual operation and (as you
246 * already realized:-) having same constant should inspire the
247 * compiler to permanently allocate the only register for it;
248 * - most modern CPUs have two ALUs, but usually only one has
249 * circuitry for shifts:-( this minor tweak inspires compiler
250 * to schedule shift instructions in a better way...
252 * <appro@fy.chalmers.se>
258 #define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))
262 * Make some obvious choices. E.g., HASH_BLOCK_DATA_ORDER_ALIGNED
263 * and HASH_BLOCK_HOST_ORDER ought to be the same if input data
264 * and host are of the same "endianess". It's possible to mask
265 * this with blank #define HASH_BLOCK_DATA_ORDER though...
267 * <appro@fy.chalmers.se>
269 #if defined(B_ENDIAN)
270 # if defined(DATA_ORDER_IS_BIG_ENDIAN)
271 # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
272 # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER
275 #elif defined(L_ENDIAN)
276 # if defined(DATA_ORDER_IS_LITTLE_ENDIAN)
277 # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
278 # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER
283 #if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
284 #ifndef HASH_BLOCK_DATA_ORDER
285 #error "HASH_BLOCK_DATA_ORDER must be defined!"
289 #if defined(DATA_ORDER_IS_BIG_ENDIAN)
292 # if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
293 # if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \
294 (defined(__x86_64) || defined(__x86_64__))
296 * This gives ~30-40% performance improvement in SHA-256 compiled
297 * with gcc [on P4]. Well, first macro to be frank. We can pull
298 * this trick on x86* platforms only, because these CPUs can fetch
299 * unaligned data without raising an exception.
301 # define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \
302 asm ("bswapl %0":"=r"(r):"0"(r)); \
304 # define HOST_l2c(l,c) ({ unsigned int r=(l); \
305 asm ("bswapl %0":"=r"(r):"0"(r)); \
306 *((unsigned int *)(c))=r; (c)+=4; r; })
312 #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++)))<<24), \
313 l|=(((unsigned long)(*((c)++)))<<16), \
314 l|=(((unsigned long)(*((c)++)))<< 8), \
315 l|=(((unsigned long)(*((c)++))) ), \
318 #define HOST_p_c2l(c,l,n) { \
320 case 0: l =((unsigned long)(*((c)++)))<<24; \
321 case 1: l|=((unsigned long)(*((c)++)))<<16; \
322 case 2: l|=((unsigned long)(*((c)++)))<< 8; \
323 case 3: l|=((unsigned long)(*((c)++))); \
325 #define HOST_p_c2l_p(c,l,sc,len) { \
327 case 0: l =((unsigned long)(*((c)++)))<<24; \
328 if (--len == 0) break; \
329 case 1: l|=((unsigned long)(*((c)++)))<<16; \
330 if (--len == 0) break; \
331 case 2: l|=((unsigned long)(*((c)++)))<< 8; \
333 /* NOTE the pointer is not incremented at the end of this */
334 #define HOST_c2l_p(c,l,n) { \
337 case 3: l =((unsigned long)(*(--(c))))<< 8; \
338 case 2: l|=((unsigned long)(*(--(c))))<<16; \
339 case 1: l|=((unsigned long)(*(--(c))))<<24; \
342 #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \
343 *((c)++)=(unsigned char)(((l)>>16)&0xff), \
344 *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
345 *((c)++)=(unsigned char)(((l) )&0xff), \
349 #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
351 #if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
353 /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */
354 # define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, l)
355 # define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, l)
360 #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++))) ), \
361 l|=(((unsigned long)(*((c)++)))<< 8), \
362 l|=(((unsigned long)(*((c)++)))<<16), \
363 l|=(((unsigned long)(*((c)++)))<<24), \
366 #define HOST_p_c2l(c,l,n) { \
368 case 0: l =((unsigned long)(*((c)++))); \
369 case 1: l|=((unsigned long)(*((c)++)))<< 8; \
370 case 2: l|=((unsigned long)(*((c)++)))<<16; \
371 case 3: l|=((unsigned long)(*((c)++)))<<24; \
373 #define HOST_p_c2l_p(c,l,sc,len) { \
375 case 0: l =((unsigned long)(*((c)++))); \
376 if (--len == 0) break; \
377 case 1: l|=((unsigned long)(*((c)++)))<< 8; \
378 if (--len == 0) break; \
379 case 2: l|=((unsigned long)(*((c)++)))<<16; \
381 /* NOTE the pointer is not incremented at the end of this */
382 #define HOST_c2l_p(c,l,n) { \
385 case 3: l =((unsigned long)(*(--(c))))<<16; \
386 case 2: l|=((unsigned long)(*(--(c))))<< 8; \
387 case 1: l|=((unsigned long)(*(--(c)))); \
390 #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \
391 *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
392 *((c)++)=(unsigned char)(((l)>>16)&0xff), \
393 *((c)++)=(unsigned char)(((l)>>24)&0xff), \
400 * Time for some action:-)
403 int HASH_UPDATE (HASH_CTX *c, const void *data_, size_t len)
405 const unsigned char *data = (const unsigned char *)data_;
406 register HASH_LONG *p;
407 register HASH_LONG l;
408 size_t sw, sc, ew, ec;
414 l = (c->Nl + (((HASH_LONG)len) << 3)) & 0xffffffffUL;
415 /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
416 * Wei Dai <weidai@eskimo.com> for pointing it out. */
417 if (l < c->Nl) { /* overflow */
420 c->Nh += (len >> 29); /* might cause compiler warning on 16-bit */
428 if ((c->num + len) >= HASH_CBLOCK) {
430 HOST_p_c2l(data, l, sc);
432 for (; sw < HASH_LBLOCK; sw++) {
436 HASH_BLOCK_HOST_ORDER (c, p, 1);
437 len -= (HASH_CBLOCK - c->num);
439 /* drop through and do the rest */
441 c->num += (unsigned int)len;
442 if ((sc + len) < 4) { /* ugly, add char's to a word */
444 HOST_p_c2l_p(data, l, sc, len);
448 ec = (c->num & 0x03);
452 HOST_p_c2l(data, l, sc);
454 for (; sw < ew; sw++) {
459 HOST_c2l_p(data, l, ec);
467 sw = len / HASH_CBLOCK;
469 #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
471 * Note that HASH_BLOCK_DATA_ORDER_ALIGNED gets defined
472 * only if sizeof(HASH_LONG)==4.
474 if ((((size_t)data) % 4) == 0) {
475 /* data is properly aligned so that we can cast it: */
476 HASH_BLOCK_DATA_ORDER_ALIGNED (c, (const HASH_LONG *)data, sw);
481 #if !defined(HASH_BLOCK_DATA_ORDER)
483 memcpy (p = c->data, data, HASH_CBLOCK);
484 HASH_BLOCK_DATA_ORDER_ALIGNED(c, p, 1);
490 #if defined(HASH_BLOCK_DATA_ORDER)
492 HASH_BLOCK_DATA_ORDER(c, data, sw);
503 ew = len >> 2; /* words to copy */
505 for (; ew; ew--, p++) {
509 HOST_c2l_p(data, l, ec);
516 void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data)
518 #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
519 if ((((size_t)data) % 4) == 0)
520 /* data is properly aligned so that we can cast it: */
522 HASH_BLOCK_DATA_ORDER_ALIGNED (c, (const HASH_LONG *)data, 1);
524 #if !defined(HASH_BLOCK_DATA_ORDER)
526 memcpy (c->data, data, HASH_CBLOCK);
527 HASH_BLOCK_DATA_ORDER_ALIGNED (c, c->data, 1);
531 #if defined(HASH_BLOCK_DATA_ORDER)
532 HASH_BLOCK_DATA_ORDER (c, data, 1);
537 int HASH_FINAL (unsigned char *md, HASH_CTX *c)
539 register HASH_LONG *p;
540 register unsigned long l;
542 static const unsigned char end[4] = {0x80, 0x00, 0x00, 0x00};
543 const unsigned char *cp = end;
545 /* c->num should definitly have room for at least one more byte. */
551 /* purify often complains about the following line as an
552 * Uninitialized Memory Read. While this can be true, the
553 * following p_c2l macro will reset l when that case is true.
554 * This is because j&0x03 contains the number of 'valid' bytes
555 * already in p[i]. If and only if j&0x03 == 0, the UMR will
556 * occur but this is also the only time p_c2l will do
557 * l= *(cp++) instead of l|= *(cp++)
558 * Many thanks to Alex Tang <altitude@cic.net> for pickup this
562 p[i] = 0; /* Yeah, but that's not the way to fix it:-) */
567 l = (j == 0) ? 0 : p[i];
569 HOST_p_c2l(cp, l, j);
570 p[i++] = l; /* i is the next 'undefined word' */
572 if (i > (HASH_LBLOCK - 2)) { /* save room for Nl and Nh */
573 if (i < HASH_LBLOCK) {
576 HASH_BLOCK_HOST_ORDER (c, p, 1);
579 for (; i < (HASH_LBLOCK - 2); i++) {
583 #if defined(DATA_ORDER_IS_BIG_ENDIAN)
584 p[HASH_LBLOCK - 2] = c->Nh;
585 p[HASH_LBLOCK - 1] = c->Nl;
586 #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
587 p[HASH_LBLOCK - 2] = c->Nl;
588 p[HASH_LBLOCK - 1] = c->Nh;
590 HASH_BLOCK_HOST_ORDER (c, p, 1);
592 #ifndef HASH_MAKE_STRING
593 #error "HASH_MAKE_STRING must be defined!"
595 HASH_MAKE_STRING(c, md);
599 /* clear stuff, HASH_BLOCK may be leaving some stuff on the stack
600 * but I'm not worried :-)
601 OPENSSL_cleanse((void *)c,sizeof(HASH_CTX));
607 #define MD32_REG_T int
609 * This comment was originaly written for MD5, which is why it
610 * discusses A-D. But it basically applies to all 32-bit digests,
611 * which is why it was moved to common header file.
613 * In case you wonder why A-D are declared as long and not
614 * as MD5_LONG. Doing so results in slight performance
615 * boost on LP64 architectures. The catch is we don't
616 * really care if 32 MSBs of a 64-bit register get polluted
617 * with eventual overflows as we *save* only 32 LSBs in
618 * *either* case. Now declaring 'em long excuses the compiler
619 * from keeping 32 MSBs zeroed resulting in 13% performance
620 * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
621 * Well, to be honest it should say that this *prevents*
622 * performance degradation.
623 * <appro@fy.chalmers.se>
624 * Apparently there're LP64 compilers that generate better
625 * code if A-D are declared int. Most notably GCC-x86_64
626 * generates better code.
627 * <appro@fy.chalmers.se>
632 #endif /* _MD32_COMMON__H */