+static inline void sk_stream_moderate_sndbuf(struct sock *sk)
+{
+ if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
+ sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);
+ sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
+ }
+}
+
+static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
+ int size, int mem, int gfp)
+{
+ struct sk_buff *skb = alloc_skb(size + sk->sk_prot->max_header, gfp);
+
+ if (skb) {
+ skb->truesize += mem;
+ if (sk->sk_forward_alloc >= (int)skb->truesize ||
+ sk_stream_mem_schedule(sk, skb->truesize, 0)) {
+ skb_reserve(skb, sk->sk_prot->max_header);
+ return skb;
+ }
+ __kfree_skb(skb);
+ } else {
+ sk->sk_prot->enter_memory_pressure();
+ sk_stream_moderate_sndbuf(sk);
+ }
+ return NULL;
+}
+
+static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk,
+ int size, int gfp)
+{
+ return sk_stream_alloc_pskb(sk, size, 0, gfp);
+}
+
+static inline struct page *sk_stream_alloc_page(struct sock *sk)
+{
+ struct page *page = NULL;
+
+ if (sk->sk_forward_alloc >= (int)PAGE_SIZE ||
+ sk_stream_mem_schedule(sk, PAGE_SIZE, 0))
+ page = alloc_pages(sk->sk_allocation, 0);
+ else {
+ sk->sk_prot->enter_memory_pressure();
+ sk_stream_moderate_sndbuf(sk);
+ }
+ return page;
+}
+
+#define sk_stream_for_retrans_queue(skb, sk) \
+ for (skb = (sk)->sk_write_queue.next; \
+ (skb != (sk)->sk_send_head) && \
+ (skb != (struct sk_buff *)&(sk)->sk_write_queue); \
+ skb = skb->next)
+