X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Flinux%2Fskbuff.h;h=724d6841d0ae7820cb3b21d34e052f6cf9590e84;hb=9bf4aaab3e101692164d49b7ca357651eb691cb6;hp=7bf6501a9024fb71169fec1f813996ebafd851f7;hpb=db216c3d5e4c040e557a50f8f5d35d5c415e8c1c;p=linux-2.6.git diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7bf6501a9..724d6841d 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -27,6 +27,7 @@ #include #include #include +#include #define HAVE_ALLOC_SKB /* For the drivers to know */ #define HAVE_ALIGNABLE_SKB /* Ditto 8) */ @@ -155,6 +156,7 @@ struct skb_shared_info { * @sk: Socket we are owned by * @stamp: Time we arrived * @dev: Device we arrived on/are leaving by + * @input_dev: Device we arrived on * @real_dev: The real device we are using * @h: Transport layer header * @nh: Network layer header @@ -197,6 +199,7 @@ struct sk_buff { struct sock *sk; struct timeval stamp; struct net_device *dev; + struct net_device *input_dev; struct net_device *real_dev; union { @@ -230,7 +233,7 @@ struct sk_buff { * want to keep them across layers you have to do a skb_clone() * first. This is owned by whoever has the skb queued ATM. */ - char cb[48]; + char cb[40]; unsigned int len, data_len, @@ -262,9 +265,15 @@ struct sk_buff { } private; #endif #ifdef CONFIG_NET_SCHED - __u32 tc_index; /* traffic control index */ + __u32 tc_index; /* traffic control index */ +#ifdef CONFIG_NET_CLS_ACT + __u32 tc_verd; /* traffic control verdict */ + __u32 tc_classid; /* traffic control classid */ + #endif + #endif + /* These elements must be at the end, see alloc_skb() for details. */ unsigned int truesize; atomic_t users; @@ -663,13 +672,15 @@ static inline int skb_pagelen(const struct sk_buff *skb) return len + skb_headlen(skb); } -static inline void skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size) +static inline void skb_fill_page_desc(struct sk_buff *skb, int i, + struct page *page, int off, int size) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - frag->page = page; - frag->page_offset = off; - frag->size = size; - skb_shinfo(skb)->nr_frags = i+1; + + frag->page = page; + frag->page_offset = off; + frag->size = size; + skb_shinfo(skb)->nr_frags = i + 1; } #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) @@ -816,6 +827,30 @@ static inline void skb_reserve(struct sk_buff *skb, unsigned int len) skb->tail += len; } +/* + * CPUs often take a performance hit when accessing unaligned memory + * locations. The actual performance hit varies, it can be small if the + * hardware handles it or large if we have to take an exception and fix it + * in software. + * + * Since an ethernet header is 14 bytes network drivers often end up with + * the IP header at an unaligned offset. The IP header can be aligned by + * shifting the start of the packet by 2 bytes. Drivers should do this + * with: + * + * skb_reserve(NET_IP_ALIGN); + * + * The downside to this alignment of the IP header is that the DMA is now + * unaligned. On some architectures the cost of an unaligned DMA is high + * and this cost outweighs the gains made by aligning the IP header. + * + * Since this trade off varies between architectures, we allow NET_IP_ALIGN + * to be overridden. + */ +#ifndef NET_IP_ALIGN +#define NET_IP_ALIGN 2 +#endif + extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc); static inline void __skb_trim(struct sk_buff *skb, unsigned int len) @@ -971,6 +1006,39 @@ static inline struct sk_buff *skb_padto(struct sk_buff *skb, unsigned int len) return skb_pad(skb, len-size); } +static inline int skb_add_data(struct sk_buff *skb, + char __user *from, int copy) +{ + const int off = skb->len; + + if (skb->ip_summed == CHECKSUM_NONE) { + int err = 0; + unsigned int csum = csum_and_copy_from_user(from, + skb_put(skb, copy), + copy, 0, &err); + if (!err) { + skb->csum = csum_block_add(skb->csum, csum, off); + return 0; + } + } else if (!copy_from_user(skb_put(skb, copy), from, copy)) + return 0; + + __skb_trim(skb, off); + return -EFAULT; +} + +static inline int skb_can_coalesce(struct sk_buff *skb, int i, + struct page *page, int off) +{ + if (i) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; + + return page == frag->page && + off == frag->page_offset + frag->size; + } + return 0; +} + /** * skb_linearize - convert paged skb to linear one * @skb: buffer to linarize @@ -1034,10 +1102,28 @@ extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum); extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); +extern void skb_split(struct sk_buff *skb, + struct sk_buff *skb1, const u32 len); extern void skb_init(void); extern void skb_add_mtu(int mtu); +struct skb_iter { + /* Iteration functions set these */ + unsigned char *data; + unsigned int len; + + /* Private to iteration */ + unsigned int nextfrag; + struct sk_buff *fraglist; +}; + +/* Keep iterating until skb_iter_next returns false. */ +extern void skb_iter_first(const struct sk_buff *skb, struct skb_iter *i); +extern int skb_iter_next(const struct sk_buff *skb, struct skb_iter *i); +/* Call this if aborting loop before !skb_iter_next */ +extern void skb_iter_abort(const struct sk_buff *skb, struct skb_iter *i); + #ifdef CONFIG_NETFILTER static inline void nf_conntrack_put(struct nf_ct_info *nfct) { @@ -1049,6 +1135,14 @@ static inline void nf_conntrack_get(struct nf_ct_info *nfct) if (nfct) atomic_inc(&nfct->master->use); } +static inline void nf_reset(struct sk_buff *skb) +{ + nf_conntrack_put(skb->nfct); + skb->nfct = NULL; +#ifdef CONFIG_NETFILTER_DEBUG + skb->nf_debug = 0; +#endif +} #ifdef CONFIG_BRIDGE_NETFILTER static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) @@ -1061,9 +1155,10 @@ static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) if (nf_bridge) atomic_inc(&nf_bridge->use); } -#endif - -#endif +#endif /* CONFIG_BRIDGE_NETFILTER */ +#else /* CONFIG_NETFILTER */ +static inline void nf_reset(struct sk_buff *skb) {} +#endif /* CONFIG_NETFILTER */ #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */