X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-sparc64%2Fchecksum.h;h=70a006da7634ab10c9f5b625977aa02270cd02f5;hb=refs%2Fheads%2Fvserver;hp=a7d31c89688181c2759433630c88da281d09c1eb;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/asm-sparc64/checksum.h b/include/asm-sparc64/checksum.h index a7d31c896..70a006da7 100644 --- a/include/asm-sparc64/checksum.h +++ b/include/asm-sparc64/checksum.h @@ -30,7 +30,7 @@ * * it's best to have buff aligned on a 32-bit boundary */ -extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); +extern __wsum csum_partial(const void * buff, int len, __wsum sum); /* the same as csum_partial, but copies from user space while it * checksums @@ -38,51 +38,50 @@ extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned i * here even more important to align src and dst on a 32-bit (or even * better 64-bit) boundary */ -extern unsigned int csum_partial_copy_sparc64(const char *src, char *dst, int len, unsigned int sum); - -static __inline__ unsigned int -csum_partial_copy_nocheck (const char *src, char *dst, int len, - unsigned int sum) -{ - int ret; - unsigned char cur_ds = get_thread_current_ds(); - __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "i" (ASI_P)); - ret = csum_partial_copy_sparc64(src, dst, len, sum); - __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" (cur_ds)); - return ret; -} +extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, + int len, __wsum sum); + +extern long __csum_partial_copy_from_user(const void __user *src, + void *dst, int len, + __wsum sum); -static __inline__ unsigned int -csum_partial_copy_from_user(const char *src, char *dst, int len, - unsigned int sum, int *err) +static inline __wsum +csum_partial_copy_from_user(const void __user *src, + void *dst, int len, + __wsum sum, int *err) { - __asm__ __volatile__ ("stx %0, [%%sp + 0x7ff + 128]" - : : "r" (err)); - return csum_partial_copy_sparc64(src, dst, len, sum); + long ret = __csum_partial_copy_from_user(src, dst, len, sum); + if (ret < 0) + *err = -EFAULT; + return (__force __wsum) ret; } /* * Copy and checksum to user */ #define HAVE_CSUM_COPY_USER -extern unsigned int csum_partial_copy_user_sparc64(const char *src, char *dst, int len, unsigned int sum); -static __inline__ unsigned int -csum_and_copy_to_user(const char *src, char *dst, int len, - unsigned int sum, int *err) +extern long __csum_partial_copy_to_user(const void *src, + void __user *dst, int len, + __wsum sum); + +static inline __wsum +csum_and_copy_to_user(const void *src, + void __user *dst, int len, + __wsum sum, int *err) { - __asm__ __volatile__ ("stx %0, [%%sp + 0x7ff + 128]" - : : "r" (err)); - return csum_partial_copy_user_sparc64(src, dst, len, sum); + long ret = __csum_partial_copy_to_user(src, dst, len, sum); + if (ret < 0) + *err = -EFAULT; + return (__force __wsum) ret; } - + /* ihl is always 5 or greater, almost always is 5, and iph is word aligned * the majority of the time. */ -extern unsigned short ip_fast_csum(__const__ unsigned char *iph, - unsigned int ihl); +extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); /* Fold a partial checksum without adding pseudo headers. */ -static __inline__ unsigned short csum_fold(unsigned int sum) +static inline __sum16 csum_fold(__wsum sum) { unsigned int tmp; @@ -92,16 +91,15 @@ static __inline__ unsigned short csum_fold(unsigned int sum) " addc %1, %%g0, %1\n" " xnor %%g0, %1, %0\n" : "=&r" (sum), "=r" (tmp) - : "0" (sum), "1" (sum<<16) + : "0" (sum), "1" ((__force u32)sum<<16) : "cc"); - return (sum & 0xffff); + return (__force __sum16)sum; } -static __inline__ unsigned long csum_tcpudp_nofold(unsigned long saddr, - unsigned long daddr, - unsigned int len, - unsigned short proto, - unsigned int sum) +static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + unsigned int len, + unsigned short proto, + __wsum sum) { __asm__ __volatile__( " addcc %1, %0, %0\n" @@ -109,7 +107,7 @@ static __inline__ unsigned long csum_tcpudp_nofold(unsigned long saddr, " addccc %3, %0, %0\n" " addc %0, %%g0, %0\n" : "=r" (sum), "=r" (saddr) - : "r" (daddr), "r" ((proto<<16)+len), "0" (sum), "1" (saddr) + : "r" (daddr), "r" (proto + len), "0" (sum), "1" (saddr) : "cc"); return sum; } @@ -118,22 +116,20 @@ static __inline__ unsigned long csum_tcpudp_nofold(unsigned long saddr, * computes the checksum of the TCP/UDP pseudo-header * returns a 16-bit checksum, already complemented */ -static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, - unsigned long daddr, +static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, unsigned short proto, - unsigned int sum) + __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); } #define _HAVE_ARCH_IPV6_CSUM -static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, - struct in6_addr *daddr, - __u32 len, - unsigned short proto, - unsigned int sum) +static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, unsigned short proto, + __wsum sum) { __asm__ __volatile__ ( " addcc %3, %4, %%g7\n" @@ -164,7 +160,7 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, } /* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */ -static __inline__ unsigned short ip_compute_csum(unsigned char * buff, int len) +static inline __sum16 ip_compute_csum(const void *buff, int len) { return csum_fold(csum_partial(buff, len, 0)); }