X-Git-Url: http://git.onelab.eu/?p=linux-2.6.git;a=blobdiff_plain;f=include%2Flinux%2Fbyteorder%2Fgeneric.h;fp=include%2Flinux%2Fbyteorder%2Fgeneric.h;h=e86e4a938373e15a3a8edcbb07802a5c9274bb02;hp=5fde6f4d6c1e3692b0abd52945e48721f428410d;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hpb=cee37fe97739d85991964371c1f3a745c00dd236 diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h index 5fde6f4d6..e86e4a938 100644 --- a/include/linux/byteorder/generic.h +++ b/include/linux/byteorder/generic.h @@ -5,6 +5,10 @@ * linux/byteorder_generic.h * Generic Byte-reordering support * + * The "... p" macros, like le64_to_cpup, can be used with pointers + * to unaligned data, but there will be a performance penalty on + * some architectures. Use get_unaligned for unaligned data. + * * Francois-Rene Rideau 19970707 * gathered all the good ideas from all asm-foo/byteorder.h into one file, * cleaned them up. @@ -152,7 +156,7 @@ extern __be32 htonl(__u32); extern __u16 ntohs(__be16); extern __be16 htons(__u16); -#if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__) +#if defined(__GNUC__) && defined(__OPTIMIZE__) #define ___htonl(x) __cpu_to_be32(x) #define ___htons(x) __cpu_to_be16(x)