1 #ifndef __LINUX_SKBUFF_WRAPPER_H
2 #define __LINUX_SKBUFF_WRAPPER_H 1
4 #include_next <linux/skbuff.h>
6 #include <linux/version.h>
8 #ifndef HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET
9 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
10 const int offset, void *to,
11 const unsigned int len)
13 memcpy(to, skb->data + offset, len);
16 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
19 const unsigned int len)
21 memcpy(skb->data + offset, from, len);
24 #endif /* !HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET */
26 #ifndef HAVE_SKB_RESET_TAIL_POINTER
27 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
29 skb->tail = skb->data;
33 * The networking layer reserves some headroom in skb data (via
34 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
35 * the header has to grow. In the default case, if the header has to grow
36 * 16 bytes or less we avoid the reallocation.
38 * Unfortunately this headroom changes the DMA alignment of the resulting
39 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
40 * on some architectures. An architecture can override this value,
41 * perhaps setting it to a cacheline in size (since that will maintain
42 * cacheline alignment of the DMA). It must be a power of 2.
44 * Various parts of the networking layer expect at least 16 bytes of
45 * headroom, you should not reduce this.
48 #define NET_SKB_PAD 16
51 #ifndef HAVE_SKB_COW_HEAD
52 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
57 if (headroom < NET_SKB_PAD)
58 headroom = NET_SKB_PAD;
59 if (headroom > skb_headroom(skb))
60 delta = headroom - skb_headroom(skb);
63 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
68 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
70 return __skb_cow(skb, headroom, skb_header_cloned(skb));
72 #endif /* !HAVE_SKB_COW_HEAD */
74 #ifndef HAVE_SKB_DST_ACCESSOR_FUNCS
75 static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
77 return (struct dst_entry *)skb->dst;
80 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
85 static inline struct rtable *skb_rtable(const struct sk_buff *skb)
87 return (struct rtable *)skb->dst;
91 #ifndef CHECKSUM_PARTIAL
92 #define CHECKSUM_PARTIAL CHECKSUM_HW
94 #ifndef CHECKSUM_COMPLETE
95 #define CHECKSUM_COMPLETE CHECKSUM_HW
98 #ifndef HAVE_SKBUFF_HEADER_HELPERS
99 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
104 static inline void skb_reset_transport_header(struct sk_buff *skb)
106 skb->h.raw = skb->data;
109 static inline void skb_set_transport_header(struct sk_buff *skb,
112 skb->h.raw = skb->data + offset;
115 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
120 static inline void skb_reset_network_header(struct sk_buff *skb)
122 skb->nh.raw = skb->data;
125 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
127 skb->nh.raw = skb->data + offset;
130 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
135 static inline void skb_reset_mac_header(struct sk_buff *skb)
137 skb->mac_header = skb->data;
140 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
142 skb->mac.raw = skb->data + offset;
145 static inline int skb_transport_offset(const struct sk_buff *skb)
147 return skb_transport_header(skb) - skb->data;
150 static inline int skb_network_offset(const struct sk_buff *skb)
152 return skb_network_header(skb) - skb->data;
155 static inline void skb_copy_to_linear_data(struct sk_buff *skb,
157 const unsigned int len)
159 memcpy(skb->data, from, len);
161 #endif /* !HAVE_SKBUFF_HEADER_HELPERS */
163 #ifndef HAVE_SKB_WARN_LRO
165 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
170 extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
172 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
174 /* LRO sets gso_size but not gso_type, whereas if GSO is really
175 * wanted then gso_type will be set. */
176 struct skb_shared_info *shinfo = skb_shinfo(skb);
177 if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) {
178 __skb_warn_lro_forwarding(skb);
183 #endif /* NETIF_F_LRO */
184 #endif /* HAVE_SKB_WARN_LRO */
186 #ifndef HAVE_CONSUME_SKB
187 #define consume_skb kfree_skb
190 #ifndef HAVE_SKB_FRAG_PAGE
191 #include <linux/mm.h>
193 static inline struct page *skb_frag_page(const skb_frag_t *frag)
198 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
202 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
206 static inline void __skb_frag_ref(skb_frag_t *frag)
208 get_page(skb_frag_page(frag));
210 static inline void __skb_frag_unref(skb_frag_t *frag)
212 put_page(skb_frag_page(frag));
215 static inline void skb_frag_ref(struct sk_buff *skb, int f)
217 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
220 static inline void skb_frag_unref(struct sk_buff *skb, int f)
222 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
227 #ifndef HAVE_SKB_RESET_MAC_LEN
228 static inline void skb_reset_mac_len(struct sk_buff *skb)
230 skb->mac_len = skb->network_header - skb->mac_header;
234 #ifndef HAVE_SKB_UNCLONE
235 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
237 might_sleep_if(pri & __GFP_WAIT);
240 return pskb_expand_head(skb, 0, 0, pri);
246 #ifndef HAVE_SKB_ORPHAN_FRAGS
247 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
253 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0)
254 #define __skb_get_rxhash rpl__skb_get_rxhash
255 #define skb_get_rxhash rpl_skb_get_rxhash
257 extern u32 __skb_get_rxhash(struct sk_buff *skb);
258 static inline __u32 skb_get_rxhash(struct sk_buff *skb)
264 return __skb_get_rxhash(skb);
267 static inline void skb_tx_error(struct sk_buff *skb)
271 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) */
273 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
274 unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
275 int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len,
280 #ifndef HAVE_SKB_HAS_FRAG_LIST
281 #define skb_has_frag_list skb_has_frags
284 #ifndef HAVE___SKB_FILL_PAGE_DESC
285 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
286 struct page *page, int off, int size)
288 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
290 __skb_frag_set_page(frag, page);
291 frag->page_offset = off;
292 skb_frag_size_set(frag, size);