2 * linux/net/sunrpc/xdr.c
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
9 #include <linux/types.h>
10 #include <linux/socket.h>
11 #include <linux/string.h>
12 #include <linux/kernel.h>
13 #include <linux/pagemap.h>
14 #include <linux/errno.h>
16 #include <linux/net.h>
18 #include <linux/sunrpc/xdr.h>
19 #include <linux/sunrpc/msg_prot.h>
22 * XDR functions for basic NFS types
25 xdr_encode_netobj(u32 *p, const struct xdr_netobj *obj)
27 unsigned int quadlen = XDR_QUADLEN(obj->len);
29 p[quadlen] = 0; /* zero trailing bytes */
30 *p++ = htonl(obj->len);
31 memcpy(p, obj->data, obj->len);
32 return p + XDR_QUADLEN(obj->len);
36 xdr_decode_netobj_fixed(u32 *p, void *obj, unsigned int len)
38 if (ntohl(*p++) != len)
41 return p + XDR_QUADLEN(len);
45 xdr_decode_netobj(u32 *p, struct xdr_netobj *obj)
49 if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ)
53 return p + XDR_QUADLEN(len);
57 * xdr_encode_opaque_fixed - Encode fixed length opaque data
58 * @p - pointer to current position in XDR buffer.
59 * @ptr - pointer to data to encode (or NULL)
60 * @nbytes - size of data.
62 * Copy the array of data of length nbytes at ptr to the XDR buffer
63 * at position p, then align to the next 32-bit boundary by padding
64 * with zero bytes (see RFC1832).
65 * Note: if ptr is NULL, only the padding is performed.
67 * Returns the updated current XDR buffer position
70 u32 *xdr_encode_opaque_fixed(u32 *p, const void *ptr, unsigned int nbytes)
72 if (likely(nbytes != 0)) {
73 unsigned int quadlen = XDR_QUADLEN(nbytes);
74 unsigned int padding = (quadlen << 2) - nbytes;
77 memcpy(p, ptr, nbytes);
79 memset((char *)p + nbytes, 0, padding);
84 EXPORT_SYMBOL(xdr_encode_opaque_fixed);
87 * xdr_encode_opaque - Encode variable length opaque data
88 * @p - pointer to current position in XDR buffer.
89 * @ptr - pointer to data to encode (or NULL)
90 * @nbytes - size of data.
92 * Returns the updated current XDR buffer position
94 u32 *xdr_encode_opaque(u32 *p, const void *ptr, unsigned int nbytes)
97 return xdr_encode_opaque_fixed(p, ptr, nbytes);
99 EXPORT_SYMBOL(xdr_encode_opaque);
102 xdr_encode_string(u32 *p, const char *string)
104 return xdr_encode_array(p, string, strlen(string));
108 xdr_decode_string(u32 *p, char **sp, int *lenp, int maxlen)
113 if ((len = ntohl(*p++)) > maxlen)
117 if ((len % 4) != 0) {
120 string = (char *) (p - 1);
121 memmove(string, p, len);
125 return p + XDR_QUADLEN(len);
129 xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen)
133 if ((len = ntohl(*p++)) > maxlen)
137 return p + XDR_QUADLEN(len);
141 xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
144 struct iovec *tail = xdr->tail;
148 xdr->page_base = base;
151 p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
156 unsigned int pad = 4 - (len & 3);
159 tail->iov_base = (char *)p + (len & 3);
168 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
169 struct page **pages, unsigned int base, unsigned int len)
171 struct iovec *head = xdr->head;
172 struct iovec *tail = xdr->tail;
173 char *buf = (char *)head->iov_base;
174 unsigned int buflen = head->iov_len;
176 head->iov_len = offset;
179 xdr->page_base = base;
182 tail->iov_base = buf + offset;
183 tail->iov_len = buflen - offset;
189 * Realign the iovec if the server missed out some reply elements
190 * (such as post-op attributes,...)
191 * Note: This is a simple implementation that assumes that
192 * len <= iov->iov_len !!!
193 * The RPC header (assumed to be the 1st element in the iov array)
196 void xdr_shift_iovec(struct iovec *iov, int nr, size_t len)
200 for (pvec = iov + nr - 1; nr > 1; nr--, pvec--) {
201 struct iovec *svec = pvec - 1;
203 if (len > pvec->iov_len) {
204 printk(KERN_DEBUG "RPC: Urk! Large shift of short iovec.\n");
207 memmove((char *)pvec->iov_base + len, pvec->iov_base,
208 pvec->iov_len - len);
210 if (len > svec->iov_len) {
211 printk(KERN_DEBUG "RPC: Urk! Large shift of short iovec.\n");
214 memcpy(pvec->iov_base,
215 (char *)svec->iov_base + svec->iov_len - len, len);
220 * Map a struct xdr_buf into an iovec array.
222 int xdr_kmap(struct iovec *iov_base, struct xdr_buf *xdr, size_t base)
224 struct iovec *iov = iov_base;
225 struct page **ppage = xdr->pages;
226 unsigned int len, pglen = xdr->page_len;
228 len = xdr->head[0].iov_len;
230 iov->iov_len = len - base;
231 iov->iov_base = (char *)xdr->head[0].iov_base + base;
243 if (base || xdr->page_base) {
245 base += xdr->page_base;
246 ppage += base >> PAGE_CACHE_SHIFT;
247 base &= ~PAGE_CACHE_MASK;
250 len = PAGE_CACHE_SIZE;
251 iov->iov_base = kmap(*ppage);
253 iov->iov_base += base;
262 } while ((pglen -= len) != 0);
264 if (xdr->tail[0].iov_len) {
265 iov->iov_len = xdr->tail[0].iov_len - base;
266 iov->iov_base = (char *)xdr->tail[0].iov_base + base;
269 return (iov - iov_base);
272 void xdr_kunmap(struct xdr_buf *xdr, size_t base)
274 struct page **ppage = xdr->pages;
275 unsigned int pglen = xdr->page_len;
279 if (base > xdr->head[0].iov_len)
280 base -= xdr->head[0].iov_len;
286 if (base || xdr->page_base) {
288 base += xdr->page_base;
289 ppage += base >> PAGE_CACHE_SHIFT;
290 /* Note: The offset means that the length of the first
291 * page is really (PAGE_CACHE_SIZE - (base & ~PAGE_CACHE_MASK)).
292 * In order to avoid an extra test inside the loop,
293 * we bump pglen here, and just subtract PAGE_CACHE_SIZE... */
294 pglen += base & ~PAGE_CACHE_MASK;
297 flush_dcache_page(*ppage);
299 if (pglen <= PAGE_CACHE_SIZE)
301 pglen -= PAGE_CACHE_SIZE;
307 xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base,
309 skb_read_actor_t copy_actor)
311 struct page **ppage = xdr->pages;
312 unsigned int len, pglen = xdr->page_len;
315 len = xdr->head[0].iov_len;
318 ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
319 if (ret != len || !desc->count)
331 if (base || xdr->page_base) {
333 base += xdr->page_base;
334 ppage += base >> PAGE_CACHE_SHIFT;
335 base &= ~PAGE_CACHE_MASK;
340 len = PAGE_CACHE_SIZE;
341 kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA);
346 ret = copy_actor(desc, kaddr + base, len);
351 ret = copy_actor(desc, kaddr, len);
353 flush_dcache_page(*ppage);
354 kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);
355 if (ret != len || !desc->count)
358 } while ((pglen -= len) != 0);
360 len = xdr->tail[0].iov_len;
362 copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
367 xdr_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
368 struct xdr_buf *xdr, unsigned int base, int msgflags)
370 struct page **ppage = xdr->pages;
371 unsigned int len, pglen = xdr->page_len;
373 ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
376 len = xdr->head[0].iov_len;
377 if (base < len || (addr != NULL && base == 0)) {
379 .iov_base = xdr->head[0].iov_base + base,
380 .iov_len = len - base,
382 struct msghdr msg = {
384 .msg_namelen = addrlen,
385 .msg_flags = msgflags,
388 if (iov.iov_len != 0) {
393 msg.msg_flags |= MSG_MORE;
394 oldfs = get_fs(); set_fs(get_ds());
395 err = sock_sendmsg(sock, &msg, iov.iov_len);
401 if (err != iov.iov_len)
413 if (base || xdr->page_base) {
415 base += xdr->page_base;
416 ppage += base >> PAGE_CACHE_SHIFT;
417 base &= ~PAGE_CACHE_MASK;
420 sendpage = sock->ops->sendpage ? : sock_no_sendpage;
422 int flags = msgflags;
424 len = PAGE_CACHE_SIZE;
430 if (pglen != len || xdr->tail[0].iov_len != 0)
433 /* Hmm... We might be dealing with highmem pages */
434 if (PageHighMem(*ppage))
435 sendpage = sock_no_sendpage;
436 err = sendpage(sock, *ppage, base, len, flags);
445 } while ((pglen -= len) != 0);
447 len = xdr->tail[0].iov_len;
450 .iov_base = xdr->tail[0].iov_base + base,
451 .iov_len = len - base,
453 struct msghdr msg = {
456 .msg_flags = msgflags,
458 oldfs = get_fs(); set_fs(get_ds());
459 err = sock_sendmsg(sock, &msg, iov.iov_len);
472 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
474 * _shift_data_right_pages
475 * @pages: vector of pages containing both the source and dest memory area.
476 * @pgto_base: page vector address of destination
477 * @pgfrom_base: page vector address of source
478 * @len: number of bytes to copy
480 * Note: the addresses pgto_base and pgfrom_base are both calculated in
482 * if a memory area starts at byte 'base' in page 'pages[i]',
483 * then its address is given as (i << PAGE_CACHE_SHIFT) + base
484 * Also note: pgfrom_base must be < pgto_base, but the memory areas
485 * they point to may overlap.
488 _shift_data_right_pages(struct page **pages, size_t pgto_base,
489 size_t pgfrom_base, size_t len)
491 struct page **pgfrom, **pgto;
495 BUG_ON(pgto_base <= pgfrom_base);
500 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
501 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
503 pgto_base &= ~PAGE_CACHE_MASK;
504 pgfrom_base &= ~PAGE_CACHE_MASK;
507 /* Are any pointers crossing a page boundary? */
508 if (pgto_base == 0) {
509 pgto_base = PAGE_CACHE_SIZE;
512 if (pgfrom_base == 0) {
513 pgfrom_base = PAGE_CACHE_SIZE;
518 if (copy > pgto_base)
520 if (copy > pgfrom_base)
525 vto = kmap_atomic(*pgto, KM_USER0);
526 vfrom = kmap_atomic(*pgfrom, KM_USER1);
527 memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
528 kunmap_atomic(vfrom, KM_USER1);
529 kunmap_atomic(vto, KM_USER0);
531 } while ((len -= copy) != 0);
536 * @pages: array of pages
537 * @pgbase: page vector address of destination
538 * @p: pointer to source data
541 * Copies data from an arbitrary memory location into an array of pages
542 * The copy is assumed to be non-overlapping.
545 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
551 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
552 pgbase &= ~PAGE_CACHE_MASK;
555 copy = PAGE_CACHE_SIZE - pgbase;
559 vto = kmap_atomic(*pgto, KM_USER0);
560 memcpy(vto + pgbase, p, copy);
561 kunmap_atomic(vto, KM_USER0);
564 if (pgbase == PAGE_CACHE_SIZE) {
570 } while ((len -= copy) != 0);
575 * @p: pointer to destination
576 * @pages: array of pages
577 * @pgbase: offset of source data
580 * Copies data into an arbitrary memory location from an array of pages
581 * The copy is assumed to be non-overlapping.
584 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
586 struct page **pgfrom;
590 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
591 pgbase &= ~PAGE_CACHE_MASK;
594 copy = PAGE_CACHE_SIZE - pgbase;
598 vfrom = kmap_atomic(*pgfrom, KM_USER0);
599 memcpy(p, vfrom + pgbase, copy);
600 kunmap_atomic(vfrom, KM_USER0);
603 if (pgbase == PAGE_CACHE_SIZE) {
609 } while ((len -= copy) != 0);
615 * @len: bytes to remove from buf->head[0]
617 * Shrinks XDR buffer's header iovec buf->head[0] by
618 * 'len' bytes. The extra data is not lost, but is instead
619 * moved into the inlined pages and/or the tail.
622 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
624 struct iovec *head, *tail;
626 unsigned int pglen = buf->page_len;
630 BUG_ON (len > head->iov_len);
632 /* Shift the tail first */
633 if (tail->iov_len != 0) {
634 if (tail->iov_len > len) {
635 copy = tail->iov_len - len;
636 memmove((char *)tail->iov_base + len,
637 tail->iov_base, copy);
639 /* Copy from the inlined pages into the tail */
644 if (offs >= tail->iov_len)
646 else if (copy > tail->iov_len - offs)
647 copy = tail->iov_len - offs;
649 _copy_from_pages((char *)tail->iov_base + offs,
651 buf->page_base + pglen + offs - len,
653 /* Do we also need to copy data from the head into the tail ? */
655 offs = copy = len - pglen;
656 if (copy > tail->iov_len)
657 copy = tail->iov_len;
658 memcpy(tail->iov_base,
659 (char *)head->iov_base +
660 head->iov_len - offs,
664 /* Now handle pages */
667 _shift_data_right_pages(buf->pages,
668 buf->page_base + len,
674 _copy_to_pages(buf->pages, buf->page_base,
675 (char *)head->iov_base + head->iov_len - len,
678 head->iov_len -= len;
680 /* Have we truncated the message? */
681 if (buf->len > buf->buflen)
682 buf->len = buf->buflen;
688 * @len: bytes to remove from buf->pages
690 * Shrinks XDR buffer's page array buf->pages by
691 * 'len' bytes. The extra data is not lost, but is instead
692 * moved into the tail.
695 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
700 unsigned int pglen = buf->page_len;
703 BUG_ON (len > pglen);
705 /* Shift the tail first */
706 if (tail->iov_len != 0) {
707 p = (char *)tail->iov_base + len;
708 if (tail->iov_len > len) {
709 copy = tail->iov_len - len;
710 memmove(p, tail->iov_base, copy);
713 /* Copy from the inlined pages into the tail */
715 if (copy > tail->iov_len)
716 copy = tail->iov_len;
717 _copy_from_pages((char *)tail->iov_base,
718 buf->pages, buf->page_base + pglen - len,
721 buf->page_len -= len;
723 /* Have we truncated the message? */
724 if (buf->len > buf->buflen)
725 buf->len = buf->buflen;
729 xdr_shift_buf(struct xdr_buf *buf, size_t len)
731 xdr_shrink_bufhead(buf, len);
735 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
736 * @xdr: pointer to xdr_stream struct
737 * @buf: pointer to XDR buffer in which to encode data
738 * @p: current pointer inside XDR buffer
740 * Note: at the moment the RPC client only passes the length of our
741 * scratch buffer in the xdr_buf's header iovec. Previously this
742 * meant we needed to call xdr_adjust_iovec() after encoding the
743 * data. With the new scheme, the xdr_stream manages the details
744 * of the buffer length, and takes care of adjusting the iovec
747 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)
749 struct iovec *iov = buf->head;
753 xdr->end = (uint32_t *)((char *)iov->iov_base + iov->iov_len);
754 buf->len = iov->iov_len = (char *)p - (char *)iov->iov_base;
757 EXPORT_SYMBOL(xdr_init_encode);
760 * xdr_reserve_space - Reserve buffer space for sending
761 * @xdr: pointer to xdr_stream
762 * @nbytes: number of bytes to reserve
764 * Checks that we have enough buffer space to encode 'nbytes' more
765 * bytes of data. If so, update the total xdr_buf length, and
766 * adjust the length of the current iovec.
768 uint32_t * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
770 uint32_t *p = xdr->p;
773 /* align nbytes on the next 32-bit boundary */
776 q = p + (nbytes >> 2);
777 if (unlikely(q > xdr->end || q < p))
780 xdr->iov->iov_len += nbytes;
781 xdr->buf->len += nbytes;
784 EXPORT_SYMBOL(xdr_reserve_space);
787 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
788 * @xdr: pointer to xdr_stream
789 * @pages: list of pages
790 * @base: offset of first byte
791 * @len: length of data in bytes
794 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
797 struct xdr_buf *buf = xdr->buf;
798 struct iovec *iov = buf->tail;
800 buf->page_base = base;
803 iov->iov_base = (char *)xdr->p;
808 unsigned int pad = 4 - (len & 3);
810 BUG_ON(xdr->p >= xdr->end);
811 iov->iov_base = (char *)xdr->p + (len & 3);
819 EXPORT_SYMBOL(xdr_write_pages);
822 * xdr_init_decode - Initialize an xdr_stream for decoding data.
823 * @xdr: pointer to xdr_stream struct
824 * @buf: pointer to XDR buffer from which to decode data
825 * @p: current pointer inside XDR buffer
827 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)
829 struct iovec *iov = buf->head;
830 unsigned int len = iov->iov_len;
837 xdr->end = (uint32_t *)((char *)iov->iov_base + len);
839 EXPORT_SYMBOL(xdr_init_decode);
842 * xdr_inline_decode - Retrieve non-page XDR data to decode
843 * @xdr: pointer to xdr_stream struct
844 * @nbytes: number of bytes of data to decode
846 * Check if the input buffer is long enough to enable us to decode
847 * 'nbytes' more bytes of data starting at the current position.
848 * If so return the current pointer, then update the current
851 uint32_t * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
853 uint32_t *p = xdr->p;
854 uint32_t *q = p + XDR_QUADLEN(nbytes);
856 if (unlikely(q > xdr->end || q < p))
861 EXPORT_SYMBOL(xdr_inline_decode);
864 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
865 * @xdr: pointer to xdr_stream struct
866 * @len: number of bytes of page data
868 * Moves data beyond the current pointer position from the XDR head[] buffer
869 * into the page list. Any data that lies beyond current position + "len"
870 * bytes is moved into the XDR tail[]. The current pointer is then
871 * repositioned at the beginning of the XDR tail.
873 void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
875 struct xdr_buf *buf = xdr->buf;
881 /* Realign pages to current pointer position */
883 shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
885 xdr_shrink_bufhead(buf, shift);
887 /* Truncate page data and move it into the tail */
888 if (buf->page_len > len)
889 xdr_shrink_pagelen(buf, buf->page_len - len);
890 padding = (XDR_QUADLEN(len) << 2) - len;
891 xdr->iov = iov = buf->tail;
892 /* Compute remaining message length. */
894 shift = buf->buflen - buf->len;
900 * Position current pointer at beginning of tail, and
901 * set remaining message length.
903 xdr->p = (uint32_t *)((char *)iov->iov_base + padding);
904 xdr->end = (uint32_t *)((char *)iov->iov_base + end);
906 EXPORT_SYMBOL(xdr_read_pages);
908 static struct iovec empty_iov = {.iov_base = NULL, .iov_len = 0};
911 xdr_buf_from_iov(struct iovec *iov, struct xdr_buf *buf)
914 buf->tail[0] = empty_iov;
916 buf->buflen = buf->len = iov->iov_len;
919 /* Sets subiov to the intersection of iov with the buffer of length len
920 * starting base bytes after iov. Indicates empty intersection by setting
921 * length of subiov to zero. Decrements len by length of subiov, sets base
922 * to zero (or decrements it by length of iov if subiov is empty). */
924 iov_subsegment(struct iovec *iov, struct iovec *subiov, int *base, int *len)
926 if (*base > iov->iov_len) {
927 subiov->iov_base = NULL;
929 *base -= iov->iov_len;
931 subiov->iov_base = iov->iov_base + *base;
932 subiov->iov_len = min(*len, (int)iov->iov_len - *base);
935 *len -= subiov->iov_len;
938 /* Sets subbuf to the portion of buf of length len beginning base bytes
939 * from the start of buf. Returns -1 if base of length are out of bounds. */
941 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
946 subbuf->buflen = subbuf->len = len;
947 iov_subsegment(buf->head, subbuf->head, &base, &len);
949 if (base < buf->page_len) {
950 i = (base + buf->page_base) >> PAGE_CACHE_SHIFT;
951 subbuf->pages = &buf->pages[i];
952 subbuf->page_base = (base + buf->page_base) & ~PAGE_CACHE_MASK;
953 subbuf->page_len = min((int)buf->page_len - base, len);
954 len -= subbuf->page_len;
957 base -= buf->page_len;
958 subbuf->page_len = 0;
961 iov_subsegment(buf->tail, subbuf->tail, &base, &len);
967 /* obj is assumed to point to allocated memory of size at least len: */
969 read_bytes_from_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len)
971 struct xdr_buf subbuf;
975 status = xdr_buf_subsegment(buf, &subbuf, base, len);
978 this_len = min(len, (int)subbuf.head[0].iov_len);
979 memcpy(obj, subbuf.head[0].iov_base, this_len);
982 this_len = min(len, (int)subbuf.page_len);
984 _copy_from_pages(obj, subbuf.pages, subbuf.page_base, this_len);
987 this_len = min(len, (int)subbuf.tail[0].iov_len);
988 memcpy(obj, subbuf.tail[0].iov_base, this_len);
994 read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj)
999 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
1006 /* If the netobj starting offset bytes from the start of xdr_buf is contained
1007 * entirely in the head or the tail, set object to point to it; otherwise
1008 * try to find space for it at the end of the tail, copy it there, and
1009 * set obj to point to it. */
1011 xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, int offset)
1013 u32 tail_offset = buf->head[0].iov_len + buf->page_len;
1016 if (read_u32_from_xdr_buf(buf, offset, &obj->len))
1018 obj_end_offset = offset + 4 + obj->len;
1020 if (obj_end_offset <= buf->head[0].iov_len) {
1021 /* The obj is contained entirely in the head: */
1022 obj->data = buf->head[0].iov_base + offset + 4;
1023 } else if (offset + 4 >= tail_offset) {
1024 if (obj_end_offset - tail_offset
1025 > buf->tail[0].iov_len)
1027 /* The obj is contained entirely in the tail: */
1028 obj->data = buf->tail[0].iov_base
1029 + offset - tail_offset + 4;
1031 /* use end of tail as storage for obj:
1032 * (We don't copy to the beginning because then we'd have
1033 * to worry about doing a potentially overlapping copy.
1034 * This assumes the object is at most half the length of the
1036 if (obj->len > buf->tail[0].iov_len)
1038 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len -
1040 if (read_bytes_from_xdr_buf(buf, offset + 4,
1041 obj->data, obj->len))