2 * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
28 * For further information regarding this notice, see:
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
32 #ifndef __XFS_ARCH_H__
33 #define __XFS_ARCH_H__
36 # error XFS_BIG_INUMS must be defined true or false
41 #include <asm/byteorder.h>
43 #ifdef __LITTLE_ENDIAN
44 # define __BYTE_ORDER __LITTLE_ENDIAN
47 # define __BYTE_ORDER __BIG_ENDIAN
50 #endif /* __KERNEL__ */
52 /* do we need conversion? */
54 #define ARCH_NOCONVERT 1
55 #if __BYTE_ORDER == __LITTLE_ENDIAN
56 # define ARCH_CONVERT 0
58 # define ARCH_CONVERT ARCH_NOCONVERT
61 /* generic swapping macros */
63 #ifndef HAVE_SWABMACROS
64 #define INT_SWAP16(type,var) ((typeof(type))(__swab16((__u16)(var))))
65 #define INT_SWAP32(type,var) ((typeof(type))(__swab32((__u32)(var))))
66 #define INT_SWAP64(type,var) ((typeof(type))(__swab64((__u64)(var))))
69 #define INT_SWAP(type, var) \
70 ((sizeof(type) == 8) ? INT_SWAP64(type,var) : \
71 ((sizeof(type) == 4) ? INT_SWAP32(type,var) : \
72 ((sizeof(type) == 2) ? INT_SWAP16(type,var) : \
75 #define INT_SWAP_UNALIGNED_32(from,to) \
77 ((__u8*)(to))[0] = ((__u8*)(from))[3]; \
78 ((__u8*)(to))[1] = ((__u8*)(from))[2]; \
79 ((__u8*)(to))[2] = ((__u8*)(from))[1]; \
80 ((__u8*)(to))[3] = ((__u8*)(from))[0]; \
83 #define INT_SWAP_UNALIGNED_64(from,to) \
85 INT_SWAP_UNALIGNED_32( ((__u8*)(from)) + 4, ((__u8*)(to))); \
86 INT_SWAP_UNALIGNED_32( ((__u8*)(from)), ((__u8*)(to)) + 4); \
90 * get and set integers from potentially unaligned locations
93 #define INT_GET_UNALIGNED_16_LE(pointer) \
94 ((__u16)((((__u8*)(pointer))[0] ) | (((__u8*)(pointer))[1] << 8 )))
95 #define INT_GET_UNALIGNED_16_BE(pointer) \
96 ((__u16)((((__u8*)(pointer))[0] << 8) | (((__u8*)(pointer))[1])))
97 #define INT_SET_UNALIGNED_16_LE(pointer,value) \
99 ((__u8*)(pointer))[0] = (((value) ) & 0xff); \
100 ((__u8*)(pointer))[1] = (((value) >> 8) & 0xff); \
102 #define INT_SET_UNALIGNED_16_BE(pointer,value) \
104 ((__u8*)(pointer))[0] = (((value) >> 8) & 0xff); \
105 ((__u8*)(pointer))[1] = (((value) ) & 0xff); \
108 #define INT_GET_UNALIGNED_32_LE(pointer) \
109 ((__u32)((((__u8*)(pointer))[0] ) | (((__u8*)(pointer))[1] << 8 ) \
110 |(((__u8*)(pointer))[2] << 16) | (((__u8*)(pointer))[3] << 24)))
111 #define INT_GET_UNALIGNED_32_BE(pointer) \
112 ((__u32)((((__u8*)(pointer))[0] << 24) | (((__u8*)(pointer))[1] << 16) \
113 |(((__u8*)(pointer))[2] << 8) | (((__u8*)(pointer))[3] )))
115 #define INT_GET_UNALIGNED_64_LE(pointer) \
116 (((__u64)(INT_GET_UNALIGNED_32_LE(((__u8*)(pointer))+4)) << 32 ) \
117 |((__u64)(INT_GET_UNALIGNED_32_LE(((__u8*)(pointer)) )) ))
118 #define INT_GET_UNALIGNED_64_BE(pointer) \
119 (((__u64)(INT_GET_UNALIGNED_32_BE(((__u8*)(pointer)) )) << 32 ) \
120 |((__u64)(INT_GET_UNALIGNED_32_BE(((__u8*)(pointer))+4)) ))
123 * now pick the right ones for our MACHINE ARCHITECTURE
126 #if __BYTE_ORDER == __LITTLE_ENDIAN
127 #define INT_GET_UNALIGNED_16(pointer) INT_GET_UNALIGNED_16_LE(pointer)
128 #define INT_SET_UNALIGNED_16(pointer,value) INT_SET_UNALIGNED_16_LE(pointer,value)
129 #define INT_GET_UNALIGNED_32(pointer) INT_GET_UNALIGNED_32_LE(pointer)
130 #define INT_GET_UNALIGNED_64(pointer) INT_GET_UNALIGNED_64_LE(pointer)
132 #define INT_GET_UNALIGNED_16(pointer) INT_GET_UNALIGNED_16_BE(pointer)
133 #define INT_SET_UNALIGNED_16(pointer,value) INT_SET_UNALIGNED_16_BE(pointer,value)
134 #define INT_GET_UNALIGNED_32(pointer) INT_GET_UNALIGNED_32_BE(pointer)
135 #define INT_GET_UNALIGNED_64(pointer) INT_GET_UNALIGNED_64_BE(pointer)
138 /* define generic INT_ macros */
140 #define INT_GET(reference,arch) \
141 (((arch) == ARCH_NOCONVERT) \
145 INT_SWAP((reference),(reference)) \
148 /* does not return a value */
149 #define INT_SET(reference,arch,valueref) \
150 (__builtin_constant_p(valueref) ? \
151 (void)( (reference) = ( ((arch) != ARCH_NOCONVERT) ? (INT_SWAP((reference),(valueref))) : (valueref)) ) : \
153 ((reference) = (valueref)), \
154 ( ((arch) != ARCH_NOCONVERT) ? (reference) = INT_SWAP((reference),(reference)) : 0 ) \
158 /* does not return a value */
159 #define INT_MOD_EXPR(reference,arch,code) \
160 (((arch) == ARCH_NOCONVERT) \
162 (void)((reference) code) \
165 (reference) = INT_GET((reference),arch) , \
166 ((reference) code), \
167 INT_SET(reference, arch, reference) \
171 /* does not return a value */
172 #define INT_MOD(reference,arch,delta) \
174 INT_MOD_EXPR(reference,arch,+=(delta)) \
178 * INT_COPY - copy a value between two locations with the
179 * _same architecture_ but _potentially different sizes_
181 * if the types of the two parameters are equal or they are
182 * in native architecture, a simple copy is done
184 * otherwise, architecture conversions are done
188 /* does not return a value */
189 #define INT_COPY(dst,src,arch) \
191 ((sizeof(dst) == sizeof(src)) || ((arch) == ARCH_NOCONVERT)) \
193 (void)((dst) = (src)) \
195 INT_SET(dst, arch, INT_GET(src, arch)) \
199 * INT_XLATE - copy a value in either direction between two locations
200 * with different architectures
202 * dir < 0 - copy from memory to buffer (native to arch)
203 * dir > 0 - copy from buffer to memory (arch to native)
206 /* does not return a value */
207 #define INT_XLATE(buf,mem,dir,arch) {\
210 (mem)=INT_GET(buf, arch); \
212 INT_SET(buf, arch, mem); \
216 #define INT_ISZERO(reference,arch) \
219 #define INT_ZERO(reference,arch) \
222 #define INT_GET_UNALIGNED_16_ARCH(pointer,arch) \
223 ( ((arch) == ARCH_NOCONVERT) \
225 (INT_GET_UNALIGNED_16(pointer)) \
227 (INT_GET_UNALIGNED_16_BE(pointer)) \
229 #define INT_SET_UNALIGNED_16_ARCH(pointer,value,arch) \
230 if ((arch) == ARCH_NOCONVERT) { \
231 INT_SET_UNALIGNED_16(pointer,value); \
233 INT_SET_UNALIGNED_16_BE(pointer,value); \
236 #define DIRINO4_GET_ARCH(pointer,arch) \
237 ( ((arch) == ARCH_NOCONVERT) \
239 (INT_GET_UNALIGNED_32(pointer)) \
241 (INT_GET_UNALIGNED_32_BE(pointer)) \
245 #define DIRINO_GET_ARCH(pointer,arch) \
246 ( ((arch) == ARCH_NOCONVERT) \
248 (INT_GET_UNALIGNED_64(pointer)) \
250 (INT_GET_UNALIGNED_64_BE(pointer)) \
253 /* MACHINE ARCHITECTURE dependent */
254 #if __BYTE_ORDER == __LITTLE_ENDIAN
255 #define DIRINO_GET_ARCH(pointer,arch) \
256 DIRINO4_GET_ARCH((((__u8*)pointer)+4),arch)
258 #define DIRINO_GET_ARCH(pointer,arch) \
259 DIRINO4_GET_ARCH(pointer,arch)
263 #define DIRINO_COPY_ARCH(from,to,arch) \
264 if ((arch) == ARCH_NOCONVERT) { \
265 memcpy(to,from,sizeof(xfs_ino_t)); \
267 INT_SWAP_UNALIGNED_64(from,to); \
269 #define DIRINO4_COPY_ARCH(from,to,arch) \
270 if ((arch) == ARCH_NOCONVERT) { \
271 memcpy(to,(((__u8*)from+4)),sizeof(xfs_dir2_ino4_t)); \
273 INT_SWAP_UNALIGNED_32(from,to); \
276 #endif /* __XFS_ARCH_H__ */