2 /* Included on ossec */
4 /* crypto/md32_common.h */
5 /* ====================================================================
6 * Copyright (c) 1999-2002 The OpenSSL Project. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
20 * 3. All advertising materials mentioning features or use of this
21 * software must display the following acknowledgment:
22 * "This product includes software developed by the OpenSSL Project
23 * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
25 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
26 * endorse or promote products derived from this software without
27 * prior written permission. For written permission, please contact
28 * licensing@OpenSSL.org.
30 * 5. Products derived from this software may not be called "OpenSSL"
31 * nor may "OpenSSL" appear in their names without prior written
32 * permission of the OpenSSL Project.
34 * 6. Redistributions of any form whatsoever must retain the following
36 * "This product includes software developed by the OpenSSL Project
37 * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
39 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
40 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
42 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
43 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
44 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
45 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
46 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
48 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
49 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
50 * OF THE POSSIBILITY OF SUCH DAMAGE.
51 * ====================================================================
53 * This product includes cryptographic software written by Eric Young
54 * (eay@cryptsoft.com). This product includes software written by Tim
55 * Hudson (tjh@cryptsoft.com).
60 * This is a generic 32 bit "collector" for message digest algorithms.
61 * Whenever needed it collects input character stream into chunks of
62 * 32 bit values and invokes a block function that performs actual hash
69 * DATA_ORDER_IS_BIG_ENDIAN or DATA_ORDER_IS_LITTLE_ENDIAN
70 * this macro defines byte order of input stream.
72 * size of a unit chunk HASH_BLOCK operates on.
74 * has to be at lest 32 bit wide, if it's wider, then
75 * HASH_LONG_LOG2 *has to* be defined along
77 * context structure that at least contains following
82 * HASH_LONG data[HASH_LBLOCK];
87 * name of "Update" function, implemented here.
89 * name of "Transform" function, implemented here.
91 * name of "Final" function, implemented here.
92 * HASH_BLOCK_HOST_ORDER
93 * name of "block" function treating *aligned* input message
94 * in host byte order, implemented externally.
95 * HASH_BLOCK_DATA_ORDER
96 * name of "block" function treating *unaligned* input message
97 * in original (data) byte order, implemented externally (it
98 * actually is optional if data and host are of the same
101 * macro convering context variables to an ASCII hash string.
105 * B_ENDIAN or L_ENDIAN
106 * defines host byte-order.
108 * defaults to 2 if not states otherwise.
110 * assumed to be HASH_CBLOCK/4 if not stated otherwise.
111 * HASH_BLOCK_DATA_ORDER_ALIGNED
112 * alternative "block" function capable of treating
113 * aligned input message in original (data) order,
114 * implemented externally.
118 * #define DATA_ORDER_IS_LITTLE_ENDIAN
120 * #define HASH_LONG MD5_LONG
121 * #define HASH_LONG_LOG2 MD5_LONG_LOG2
122 * #define HASH_CTX MD5_CTX
123 * #define HASH_CBLOCK MD5_CBLOCK
124 * #define HASH_LBLOCK MD5_LBLOCK
125 * #define HASH_UPDATE MD5_Update
126 * #define HASH_TRANSFORM MD5_Transform
127 * #define HASH_FINAL MD5_Final
128 * #define HASH_BLOCK_HOST_ORDER md5_block_host_order
129 * #define HASH_BLOCK_DATA_ORDER md5_block_data_order
131 * <appro@fy.chalmers.se>
135 #ifndef _MD32_COMMON__H
136 #define _MD32_COMMON__H
139 #if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN)
140 #error "DATA_ORDER must be defined!"
144 #error "HASH_CBLOCK must be defined!"
147 #error "HASH_LONG must be defined!"
150 #error "HASH_CTX must be defined!"
154 #error "HASH_UPDATE must be defined!"
156 #ifndef HASH_TRANSFORM
157 #error "HASH_TRANSFORM must be defined!"
160 #error "HASH_FINAL must be defined!"
163 #ifndef HASH_BLOCK_HOST_ORDER
164 #error "HASH_BLOCK_HOST_ORDER must be defined!"
169 * Moved below as it's required only if HASH_BLOCK_DATA_ORDER_ALIGNED
172 #ifndef HASH_BLOCK_DATA_ORDER
173 #error "HASH_BLOCK_DATA_ORDER must be defined!"
178 #define HASH_LBLOCK (HASH_CBLOCK/4)
181 #ifndef HASH_LONG_LOG2
182 #define HASH_LONG_LOG2 2
186 * Engage compiler specific rotate intrinsic function if available.
190 # if defined(_MSC_VER) || defined(__ICC)
191 # define ROTATE(a,n) _lrotl(a,n)
192 # elif defined(__MWERKS__)
193 # if defined(__POWERPC__)
194 # define ROTATE(a,n) __rlwinm(a,n,0,31)
195 # elif defined(__MC68K__)
196 /* Motorola specific tweak. <appro@fy.chalmers.se> */
197 # define ROTATE(a,n) ( n<24 ? __rol(a,n) : __ror(a,32-n) )
199 # define ROTATE(a,n) __rol(a,n)
201 # elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
203 * Some GNU C inline assembler templates. Note that these are
204 * rotates by *constant* number of bits! But that's exactly
205 * what we need here...
206 * <appro@fy.chalmers.se>
208 # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
209 # define ROTATE(a,n) ({ register unsigned int ret; \
217 # elif defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__)
218 # define ROTATE(a,n) ({ register unsigned int ret; \
220 "rlwinm %0,%1,%2,0,31" \
227 #endif /* PEDANTIC */
229 #if HASH_LONG_LOG2==2 /* Engage only if sizeof(HASH_LONG)== 4 */
230 /* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */
232 /* 5 instructions with rotate instruction, else 9 */
233 #define REVERSE_FETCH32(a,l) ( \
234 l=*(const HASH_LONG *)(a), \
235 ((ROTATE(l,8)&0x00FF00FF)|(ROTATE((l&0x00FF00FF),24))) \
238 /* 6 instructions with rotate instruction, else 8 */
239 #define REVERSE_FETCH32(a,l) ( \
240 l=*(const HASH_LONG *)(a), \
241 l=(((l>>8)&0x00FF00FF)|((l&0x00FF00FF)<<8)), \
245 * Originally the middle line started with l=(((l&0xFF00FF00)>>8)|...
246 * It's rewritten as above for two reasons:
247 * - RISCs aren't good at long constants and have to explicitely
248 * compose 'em with several (well, usually 2) instructions in a
249 * register before performing the actual operation and (as you
250 * already realized:-) having same constant should inspire the
251 * compiler to permanently allocate the only register for it;
252 * - most modern CPUs have two ALUs, but usually only one has
253 * circuitry for shifts:-( this minor tweak inspires compiler
254 * to schedule shift instructions in a better way...
256 * <appro@fy.chalmers.se>
262 #define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))
266 * Make some obvious choices. E.g., HASH_BLOCK_DATA_ORDER_ALIGNED
267 * and HASH_BLOCK_HOST_ORDER ought to be the same if input data
268 * and host are of the same "endianess". It's possible to mask
269 * this with blank #define HASH_BLOCK_DATA_ORDER though...
271 * <appro@fy.chalmers.se>
273 #if defined(B_ENDIAN)
274 # if defined(DATA_ORDER_IS_BIG_ENDIAN)
275 # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
276 # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER
279 #elif defined(L_ENDIAN)
280 # if defined(DATA_ORDER_IS_LITTLE_ENDIAN)
281 # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
282 # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER
287 #if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
288 #ifndef HASH_BLOCK_DATA_ORDER
289 #error "HASH_BLOCK_DATA_ORDER must be defined!"
293 #if defined(DATA_ORDER_IS_BIG_ENDIAN)
296 # if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
297 # if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \
298 (defined(__x86_64) || defined(__x86_64__))
300 * This gives ~30-40% performance improvement in SHA-256 compiled
301 * with gcc [on P4]. Well, first macro to be frank. We can pull
302 * this trick on x86* platforms only, because these CPUs can fetch
303 * unaligned data without raising an exception.
305 # define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \
306 asm ("bswapl %0":"=r"(r):"0"(r)); \
308 # define HOST_l2c(l,c) ({ unsigned int r=(l); \
309 asm ("bswapl %0":"=r"(r):"0"(r)); \
310 *((unsigned int *)(c))=r; (c)+=4; r; })
316 #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++)))<<24), \
317 l|=(((unsigned long)(*((c)++)))<<16), \
318 l|=(((unsigned long)(*((c)++)))<< 8), \
319 l|=(((unsigned long)(*((c)++))) ), \
322 #define HOST_p_c2l(c,l,n) { \
324 case 0: l =((unsigned long)(*((c)++)))<<24; \
325 case 1: l|=((unsigned long)(*((c)++)))<<16; \
326 case 2: l|=((unsigned long)(*((c)++)))<< 8; \
327 case 3: l|=((unsigned long)(*((c)++))); \
329 #define HOST_p_c2l_p(c,l,sc,len) { \
331 case 0: l =((unsigned long)(*((c)++)))<<24; \
332 if (--len == 0) break; \
333 case 1: l|=((unsigned long)(*((c)++)))<<16; \
334 if (--len == 0) break; \
335 case 2: l|=((unsigned long)(*((c)++)))<< 8; \
337 /* NOTE the pointer is not incremented at the end of this */
338 #define HOST_c2l_p(c,l,n) { \
341 case 3: l =((unsigned long)(*(--(c))))<< 8; \
342 case 2: l|=((unsigned long)(*(--(c))))<<16; \
343 case 1: l|=((unsigned long)(*(--(c))))<<24; \
346 #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \
347 *((c)++)=(unsigned char)(((l)>>16)&0xff), \
348 *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
349 *((c)++)=(unsigned char)(((l) )&0xff), \
353 #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
355 #if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
357 /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */
358 # define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, l)
359 # define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, l)
364 #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++))) ), \
365 l|=(((unsigned long)(*((c)++)))<< 8), \
366 l|=(((unsigned long)(*((c)++)))<<16), \
367 l|=(((unsigned long)(*((c)++)))<<24), \
370 #define HOST_p_c2l(c,l,n) { \
372 case 0: l =((unsigned long)(*((c)++))); \
373 case 1: l|=((unsigned long)(*((c)++)))<< 8; \
374 case 2: l|=((unsigned long)(*((c)++)))<<16; \
375 case 3: l|=((unsigned long)(*((c)++)))<<24; \
377 #define HOST_p_c2l_p(c,l,sc,len) { \
379 case 0: l =((unsigned long)(*((c)++))); \
380 if (--len == 0) break; \
381 case 1: l|=((unsigned long)(*((c)++)))<< 8; \
382 if (--len == 0) break; \
383 case 2: l|=((unsigned long)(*((c)++)))<<16; \
385 /* NOTE the pointer is not incremented at the end of this */
386 #define HOST_c2l_p(c,l,n) { \
389 case 3: l =((unsigned long)(*(--(c))))<<16; \
390 case 2: l|=((unsigned long)(*(--(c))))<< 8; \
391 case 1: l|=((unsigned long)(*(--(c)))); \
394 #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \
395 *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
396 *((c)++)=(unsigned char)(((l)>>16)&0xff), \
397 *((c)++)=(unsigned char)(((l)>>24)&0xff), \
404 * Time for some action:-)
407 int HASH_UPDATE (HASH_CTX *c, const void *data_, size_t len)
409 const unsigned char *data=data_;
410 register HASH_LONG * p;
411 register HASH_LONG l;
414 if (len==0) return 1;
416 l=(c->Nl+(((HASH_LONG)len)<<3))&0xffffffffUL;
417 /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
418 * Wei Dai <weidai@eskimo.com> for pointing it out. */
419 if (l < c->Nl) /* overflow */
421 c->Nh+=(len>>29); /* might cause compiler warning on 16-bit */
430 if ((c->num+len) >= HASH_CBLOCK)
432 l=p[sw]; HOST_p_c2l(data,l,sc); p[sw++]=l;
433 for (; sw<HASH_LBLOCK; sw++)
435 HOST_c2l(data,l); p[sw]=l;
437 HASH_BLOCK_HOST_ORDER (c,p,1);
438 len-=(HASH_CBLOCK-c->num);
440 /* drop through and do the rest */
444 c->num+=(unsigned int)len;
445 if ((sc+len) < 4) /* ugly, add char's to a word */
447 l=p[sw]; HOST_p_c2l_p(data,l,sc,len); p[sw]=l;
455 HOST_p_c2l(data,l,sc);
457 for (; sw < ew; sw++)
459 HOST_c2l(data,l); p[sw]=l;
463 HOST_c2l_p(data,l,ec); p[sw]=l;
473 #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
475 * Note that HASH_BLOCK_DATA_ORDER_ALIGNED gets defined
476 * only if sizeof(HASH_LONG)==4.
478 if ((((size_t)data)%4) == 0)
480 /* data is properly aligned so that we can cast it: */
481 HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,sw);
487 #if !defined(HASH_BLOCK_DATA_ORDER)
490 memcpy (p=c->data,data,HASH_CBLOCK);
491 HASH_BLOCK_DATA_ORDER_ALIGNED(c,p,1);
497 #if defined(HASH_BLOCK_DATA_ORDER)
499 HASH_BLOCK_DATA_ORDER(c,data,sw);
511 ew=len>>2; /* words to copy */
515 HOST_c2l(data,l); *p=l;
517 HOST_c2l_p(data,l,ec);
524 void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data)
526 #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
527 if ((((size_t)data)%4) == 0)
528 /* data is properly aligned so that we can cast it: */
529 HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,1);
531 #if !defined(HASH_BLOCK_DATA_ORDER)
533 memcpy (c->data,data,HASH_CBLOCK);
534 HASH_BLOCK_DATA_ORDER_ALIGNED (c,c->data,1);
538 #if defined(HASH_BLOCK_DATA_ORDER)
539 HASH_BLOCK_DATA_ORDER (c,data,1);
544 int HASH_FINAL (unsigned char *md, HASH_CTX *c)
546 register HASH_LONG *p;
547 register unsigned long l;
549 static const unsigned char end[4]={0x80,0x00,0x00,0x00};
550 const unsigned char *cp=end;
552 /* c->num should definitly have room for at least one more byte. */
558 /* purify often complains about the following line as an
559 * Uninitialized Memory Read. While this can be true, the
560 * following p_c2l macro will reset l when that case is true.
561 * This is because j&0x03 contains the number of 'valid' bytes
562 * already in p[i]. If and only if j&0x03 == 0, the UMR will
563 * occur but this is also the only time p_c2l will do
564 * l= *(cp++) instead of l|= *(cp++)
565 * Many thanks to Alex Tang <altitude@cic.net> for pickup this
568 if (j==0) p[i]=0; /* Yeah, but that's not the way to fix it:-) */
572 l = (j==0) ? 0 : p[i];
574 HOST_p_c2l(cp,l,j); p[i++]=l; /* i is the next 'undefined word' */
576 if (i>(HASH_LBLOCK-2)) /* save room for Nl and Nh */
578 if (i<HASH_LBLOCK) p[i]=0;
579 HASH_BLOCK_HOST_ORDER (c,p,1);
582 for (; i<(HASH_LBLOCK-2); i++)
585 #if defined(DATA_ORDER_IS_BIG_ENDIAN)
586 p[HASH_LBLOCK-2]=c->Nh;
587 p[HASH_LBLOCK-1]=c->Nl;
588 #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
589 p[HASH_LBLOCK-2]=c->Nl;
590 p[HASH_LBLOCK-1]=c->Nh;
592 HASH_BLOCK_HOST_ORDER (c,p,1);
594 #ifndef HASH_MAKE_STRING
595 #error "HASH_MAKE_STRING must be defined!"
597 HASH_MAKE_STRING(c,md);
601 /* clear stuff, HASH_BLOCK may be leaving some stuff on the stack
602 * but I'm not worried :-)
603 OPENSSL_cleanse((void *)c,sizeof(HASH_CTX));
609 #define MD32_REG_T long
611 * This comment was originaly written for MD5, which is why it
612 * discusses A-D. But it basically applies to all 32-bit digests,
613 * which is why it was moved to common header file.
615 * In case you wonder why A-D are declared as long and not
616 * as MD5_LONG. Doing so results in slight performance
617 * boost on LP64 architectures. The catch is we don't
618 * really care if 32 MSBs of a 64-bit register get polluted
619 * with eventual overflows as we *save* only 32 LSBs in
620 * *either* case. Now declaring 'em long excuses the compiler
621 * from keeping 32 MSBs zeroed resulting in 13% performance
622 * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
623 * Well, to be honest it should say that this *prevents*
624 * performance degradation.
625 * <appro@fy.chalmers.se>
626 * Apparently there're LP64 compilers that generate better
627 * code if A-D are declared int. Most notably GCC-x86_64
628 * generates better code.
629 * <appro@fy.chalmers.se>
634 #endif /* _MD32_COMMON__H */