]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
net: Optimize memory usage when splicing from sockets.
authorJarek Poplawski <jarkao2@gmail.com>
Sun, 1 Feb 2009 08:41:42 +0000 (00:41 -0800)
committerDavid S. Miller <davem@davemloft.net>
Sun, 1 Feb 2009 08:41:42 +0000 (00:41 -0800)
The recent fix of data corruption when splicing from sockets uses
memory very inefficiently allocating a new page to copy each chunk of
linear part of skb. This patch uses the same page until it's full
(almost) by caching the page in sk_sndmsg_page field.

With changes from David S. Miller <davem@davemloft.net>

Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/core/skbuff.c

index f20e758fe46b84388085e6e2dd4d0782a457003b..e55d1ef5690dc4d024454bc2142b53a1c357a148 100644 (file)
@@ -1333,14 +1333,39 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
        put_page(spd->pages[i]);
 }
 
-static inline struct page *linear_to_page(struct page *page, unsigned int len,
-                                         unsigned int offset)
-{
-       struct page *p = alloc_pages(GFP_KERNEL, 0);
+static inline struct page *linear_to_page(struct page *page, unsigned int *len,
+                                         unsigned int *offset,
+                                         struct sk_buff *skb)
+{
+       struct sock *sk = skb->sk;
+       struct page *p = sk->sk_sndmsg_page;
+       unsigned int off;
+
+       if (!p) {
+new_page:
+               p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
+               if (!p)
+                       return NULL;
 
-       if (!p)
-               return NULL;
-       memcpy(page_address(p) + offset, page_address(page) + offset, len);
+               off = sk->sk_sndmsg_off = 0;
+               /* hold one ref to this page until it's full */
+       } else {
+               unsigned int mlen;
+
+               off = sk->sk_sndmsg_off;
+               mlen = PAGE_SIZE - off;
+               if (mlen < 64 && mlen < *len) {
+                       put_page(p);
+                       goto new_page;
+               }
+
+               *len = min_t(unsigned int, *len, mlen);
+       }
+
+       memcpy(page_address(p) + off, page_address(page) + *offset, *len);
+       sk->sk_sndmsg_off += *len;
+       *offset = off;
+       get_page(p);
 
        return p;
 }
@@ -1349,21 +1374,21 @@ static inline struct page *linear_to_page(struct page *page, unsigned int len,
  * Fill page/offset/length into spd, if it can hold more pages.
  */
 static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
-                               unsigned int len, unsigned int offset,
+                               unsigned int *len, unsigned int offset,
                                struct sk_buff *skb, int linear)
 {
        if (unlikely(spd->nr_pages == PIPE_BUFFERS))
                return 1;
 
        if (linear) {
-               page = linear_to_page(page, len, offset);
+               page = linear_to_page(page, len, &offset, skb);
                if (!page)
                        return 1;
        } else
                get_page(page);
 
        spd->pages[spd->nr_pages] = page;
-       spd->partial[spd->nr_pages].len = len;
+       spd->partial[spd->nr_pages].len = *len;
        spd->partial[spd->nr_pages].offset = offset;
        spd->nr_pages++;
 
@@ -1405,7 +1430,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
                /* the linear region may spread across several pages  */
                flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
 
-               if (spd_fill_page(spd, page, flen, poff, skb, linear))
+               if (spd_fill_page(spd, page, &flen, poff, skb, linear))
                        return 1;
 
                __segment_seek(&page, &poff, &plen, flen);