1 #ifndef __LINUX_SKBUFF_WRAPPER_H
2 #define __LINUX_SKBUFF_WRAPPER_H 1
4 #include_next <linux/skbuff.h>
6 #include <linux/version.h>
8 #ifndef HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET
9 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
10 const int offset, void *to,
11 const unsigned int len)
13 memcpy(to, skb->data + offset, len);
16 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
19 const unsigned int len)
21 memcpy(skb->data + offset, from, len);
24 #endif /* !HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET */
26 #ifndef HAVE_SKB_RESET_TAIL_POINTER
27 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
29 skb->tail = skb->data;
33 * The networking layer reserves some headroom in skb data (via
34 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
35 * the header has to grow. In the default case, if the header has to grow
36 * 16 bytes or less we avoid the reallocation.
38 * Unfortunately this headroom changes the DMA alignment of the resulting
39 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
40 * on some architectures. An architecture can override this value,
41 * perhaps setting it to a cacheline in size (since that will maintain
42 * cacheline alignment of the DMA). It must be a power of 2.
44 * Various parts of the networking layer expect at least 16 bytes of
45 * headroom, you should not reduce this.
48 #define NET_SKB_PAD 16
51 #ifndef HAVE_SKB_COW_HEAD
52 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
57 if (headroom < NET_SKB_PAD)
58 headroom = NET_SKB_PAD;
59 if (headroom > skb_headroom(skb))
60 delta = headroom - skb_headroom(skb);
63 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
68 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
70 return __skb_cow(skb, headroom, skb_header_cloned(skb));
72 #endif /* !HAVE_SKB_COW_HEAD */
74 #ifndef HAVE_SKB_DST_ACCESSOR_FUNCS
75 static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
77 return (struct dst_entry *)skb->dst;
80 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
85 static inline struct rtable *skb_rtable(const struct sk_buff *skb)
87 return (struct rtable *)skb->dst;
91 #ifndef CHECKSUM_PARTIAL
92 #define CHECKSUM_PARTIAL CHECKSUM_HW
94 #ifndef CHECKSUM_COMPLETE
95 #define CHECKSUM_COMPLETE CHECKSUM_HW
99 #define mac_header mac.raw
100 #define network_header nh.raw
101 #define transport_header h.raw
104 #ifndef HAVE_SKBUFF_HEADER_HELPERS
105 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
110 static inline void skb_reset_transport_header(struct sk_buff *skb)
112 skb->h.raw = skb->data;
115 static inline void skb_set_transport_header(struct sk_buff *skb,
118 skb->h.raw = skb->data + offset;
121 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
126 static inline void skb_reset_network_header(struct sk_buff *skb)
128 skb->nh.raw = skb->data;
131 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
133 skb->nh.raw = skb->data + offset;
136 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
141 static inline void skb_reset_mac_header(struct sk_buff *skb)
143 skb->mac_header = skb->data;
146 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
148 skb->mac.raw = skb->data + offset;
151 static inline int skb_transport_offset(const struct sk_buff *skb)
153 return skb_transport_header(skb) - skb->data;
156 static inline int skb_network_offset(const struct sk_buff *skb)
158 return skb_network_header(skb) - skb->data;
161 static inline void skb_copy_to_linear_data(struct sk_buff *skb,
163 const unsigned int len)
165 memcpy(skb->data, from, len);
167 #endif /* !HAVE_SKBUFF_HEADER_HELPERS */
169 #ifndef HAVE_SKB_WARN_LRO
171 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
176 extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
178 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
180 /* LRO sets gso_size but not gso_type, whereas if GSO is really
181 * wanted then gso_type will be set. */
182 struct skb_shared_info *shinfo = skb_shinfo(skb);
183 if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) {
184 __skb_warn_lro_forwarding(skb);
189 #endif /* NETIF_F_LRO */
190 #endif /* HAVE_SKB_WARN_LRO */
192 #ifndef HAVE_CONSUME_SKB
193 #define consume_skb kfree_skb
196 #ifndef HAVE_SKB_FRAG_PAGE
197 static inline struct page *skb_frag_page(const skb_frag_t *frag)
203 #ifndef HAVE_SKB_RESET_MAC_LEN
204 static inline void skb_reset_mac_len(struct sk_buff *skb)
206 skb->mac_len = skb->network_header - skb->mac_header;
210 #ifndef HAVE_SKB_UNCLONE
211 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
213 might_sleep_if(pri & __GFP_WAIT);
216 return pskb_expand_head(skb, 0, 0, pri);
222 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
223 extern u32 __skb_get_rxhash(struct sk_buff *skb);
224 static inline __u32 skb_get_rxhash(struct sk_buff *skb)
226 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,34)
229 return __skb_get_rxhash(skb);