2 * linux/net/sunrpc/xdr.c
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
9 #include <linux/types.h>
10 #include <linux/socket.h>
11 #include <linux/string.h>
12 #include <linux/kernel.h>
13 #include <linux/pagemap.h>
14 #include <linux/errno.h>
16 #include <linux/sunrpc/xdr.h>
17 #include <linux/sunrpc/msg_prot.h>
20 * XDR functions for basic NFS types
23 xdr_encode_netobj(u32 *p, const struct xdr_netobj *obj)
25 unsigned int quadlen = XDR_QUADLEN(obj->len);
27 p[quadlen] = 0; /* zero trailing bytes */
28 *p++ = htonl(obj->len);
29 memcpy(p, obj->data, obj->len);
30 return p + XDR_QUADLEN(obj->len);
34 xdr_decode_netobj_fixed(u32 *p, void *obj, unsigned int len)
36 if (ntohl(*p++) != len)
39 return p + XDR_QUADLEN(len);
43 xdr_decode_netobj(u32 *p, struct xdr_netobj *obj)
47 if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ)
51 return p + XDR_QUADLEN(len);
55 xdr_encode_array(u32 *p, const char *array, unsigned int len)
57 int quadlen = XDR_QUADLEN(len);
61 memcpy(p, array, len);
66 xdr_encode_string(u32 *p, const char *string)
68 return xdr_encode_array(p, string, strlen(string));
72 xdr_decode_string(u32 *p, char **sp, int *lenp, int maxlen)
77 if ((len = ntohl(*p++)) > maxlen)
84 string = (char *) (p - 1);
85 memmove(string, p, len);
89 return p + XDR_QUADLEN(len);
93 xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen)
97 if ((len = ntohl(*p++)) > maxlen)
101 return p + XDR_QUADLEN(len);
106 xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
110 xdr->page_base = base;
114 struct iovec *iov = xdr->tail;
115 unsigned int pad = 4 - (len & 3);
117 iov->iov_base = (void *) "\0\0\0";
125 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
126 struct page **pages, unsigned int base, unsigned int len)
128 struct iovec *head = xdr->head;
129 struct iovec *tail = xdr->tail;
130 char *buf = (char *)head->iov_base;
131 unsigned int buflen = head->iov_len;
133 head->iov_len = offset;
136 xdr->page_base = base;
139 tail->iov_base = buf + offset;
140 tail->iov_len = buflen - offset;
146 * Realign the iovec if the server missed out some reply elements
147 * (such as post-op attributes,...)
148 * Note: This is a simple implementation that assumes that
149 * len <= iov->iov_len !!!
150 * The RPC header (assumed to be the 1st element in the iov array)
153 void xdr_shift_iovec(struct iovec *iov, int nr, size_t len)
157 for (pvec = iov + nr - 1; nr > 1; nr--, pvec--) {
158 struct iovec *svec = pvec - 1;
160 if (len > pvec->iov_len) {
161 printk(KERN_DEBUG "RPC: Urk! Large shift of short iovec.\n");
164 memmove((char *)pvec->iov_base + len, pvec->iov_base,
165 pvec->iov_len - len);
167 if (len > svec->iov_len) {
168 printk(KERN_DEBUG "RPC: Urk! Large shift of short iovec.\n");
171 memcpy(pvec->iov_base,
172 (char *)svec->iov_base + svec->iov_len - len, len);
177 * Map a struct xdr_buf into an iovec array.
179 int xdr_kmap(struct iovec *iov_base, struct xdr_buf *xdr, unsigned int base)
181 struct iovec *iov = iov_base;
182 struct page **ppage = xdr->pages;
183 struct page **first_kmap = NULL;
184 unsigned int len, pglen = xdr->page_len;
186 len = xdr->head[0].iov_len;
188 iov->iov_len = len - base;
189 iov->iov_base = (char *)xdr->head[0].iov_base + base;
201 if (base || xdr->page_base) {
203 base += xdr->page_base;
204 ppage += base >> PAGE_CACHE_SHIFT;
205 base &= ~PAGE_CACHE_MASK;
208 len = PAGE_CACHE_SIZE;
211 iov->iov_base = kmap(*ppage);
213 iov->iov_base = kmap_nonblock(*ppage);
218 iov->iov_base += base;
227 } while ((pglen -= len) != 0);
229 if (xdr->tail[0].iov_len) {
230 iov->iov_len = xdr->tail[0].iov_len - base;
231 iov->iov_base = (char *)xdr->tail[0].iov_base + base;
234 return (iov - iov_base);
236 for (; first_kmap != ppage; first_kmap++)
241 void xdr_kunmap(struct xdr_buf *xdr, unsigned int base, int niov)
243 struct page **ppage = xdr->pages;
244 unsigned int pglen = xdr->page_len;
248 if (base >= xdr->head[0].iov_len)
249 base -= xdr->head[0].iov_len;
257 if (base || xdr->page_base) {
259 base += xdr->page_base;
260 ppage += base >> PAGE_CACHE_SHIFT;
261 /* Note: The offset means that the length of the first
262 * page is really (PAGE_CACHE_SIZE - (base & ~PAGE_CACHE_MASK)).
263 * In order to avoid an extra test inside the loop,
264 * we bump pglen here, and just subtract PAGE_CACHE_SIZE... */
265 pglen += base & ~PAGE_CACHE_MASK;
268 * In case we could only do a partial xdr_kmap, all remaining iovecs
269 * refer to pages. Otherwise we detect the end through pglen.
271 for (; niov; niov--) {
272 flush_dcache_page(*ppage);
274 if (pglen <= PAGE_CACHE_SIZE)
276 pglen -= PAGE_CACHE_SIZE;
282 xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base,
284 skb_read_actor_t copy_actor)
286 struct page **ppage = xdr->pages;
287 unsigned int len, pglen = xdr->page_len;
290 len = xdr->head[0].iov_len;
293 ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
294 if (ret != len || !desc->count)
306 if (base || xdr->page_base) {
308 base += xdr->page_base;
309 ppage += base >> PAGE_CACHE_SHIFT;
310 base &= ~PAGE_CACHE_MASK;
315 len = PAGE_CACHE_SIZE;
316 kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA);
321 ret = copy_actor(desc, kaddr + base, len);
326 ret = copy_actor(desc, kaddr, len);
328 kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);
329 if (ret != len || !desc->count)
332 } while ((pglen -= len) != 0);
334 len = xdr->tail[0].iov_len;
336 copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len);
340 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
342 * _shift_data_right_pages
343 * @pages: vector of pages containing both the source and dest memory area.
344 * @pgto_base: page vector address of destination
345 * @pgfrom_base: page vector address of source
346 * @len: number of bytes to copy
348 * Note: the addresses pgto_base and pgfrom_base are both calculated in
350 * if a memory area starts at byte 'base' in page 'pages[i]',
351 * then its address is given as (i << PAGE_CACHE_SHIFT) + base
352 * Also note: pgfrom_base must be < pgto_base, but the memory areas
353 * they point to may overlap.
356 _shift_data_right_pages(struct page **pages, size_t pgto_base,
357 size_t pgfrom_base, size_t len)
359 struct page **pgfrom, **pgto;
363 BUG_ON(pgto_base <= pgfrom_base);
368 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
369 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
371 pgto_base &= ~PAGE_CACHE_MASK;
372 pgfrom_base &= ~PAGE_CACHE_MASK;
375 /* Are any pointers crossing a page boundary? */
376 if (pgto_base == 0) {
377 pgto_base = PAGE_CACHE_SIZE;
380 if (pgfrom_base == 0) {
381 pgfrom_base = PAGE_CACHE_SIZE;
386 if (copy > pgto_base)
388 if (copy > pgfrom_base)
393 vto = kmap_atomic(*pgto, KM_USER0);
394 vfrom = kmap_atomic(*pgfrom, KM_USER1);
395 memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
396 kunmap_atomic(vfrom, KM_USER1);
397 kunmap_atomic(vto, KM_USER0);
399 } while ((len -= copy) != 0);
404 * @pages: array of pages
405 * @pgbase: page vector address of destination
406 * @p: pointer to source data
409 * Copies data from an arbitrary memory location into an array of pages
410 * The copy is assumed to be non-overlapping.
413 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
419 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
420 pgbase &= ~PAGE_CACHE_MASK;
423 copy = PAGE_CACHE_SIZE - pgbase;
427 vto = kmap_atomic(*pgto, KM_USER0);
428 memcpy(vto + pgbase, p, copy);
429 kunmap_atomic(vto, KM_USER0);
432 if (pgbase == PAGE_CACHE_SIZE) {
438 } while ((len -= copy) != 0);
443 * @p: pointer to destination
444 * @pages: array of pages
445 * @pgbase: offset of source data
448 * Copies data into an arbitrary memory location from an array of pages
449 * The copy is assumed to be non-overlapping.
452 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
454 struct page **pgfrom;
458 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
459 pgbase &= ~PAGE_CACHE_MASK;
462 copy = PAGE_CACHE_SIZE - pgbase;
466 vfrom = kmap_atomic(*pgfrom, KM_USER0);
467 memcpy(p, vfrom + pgbase, copy);
468 kunmap_atomic(vfrom, KM_USER0);
471 if (pgbase == PAGE_CACHE_SIZE) {
477 } while ((len -= copy) != 0);
483 * @len: bytes to remove from buf->head[0]
485 * Shrinks XDR buffer's header iovec buf->head[0] by
486 * 'len' bytes. The extra data is not lost, but is instead
487 * moved into the inlined pages and/or the tail.
490 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
492 struct iovec *head, *tail;
494 unsigned int pglen = buf->page_len;
498 BUG_ON (len > head->iov_len);
500 /* Shift the tail first */
501 if (tail->iov_len != 0) {
502 if (tail->iov_len > len) {
503 copy = tail->iov_len - len;
504 memmove((char *)tail->iov_base + len,
505 tail->iov_base, copy);
507 /* Copy from the inlined pages into the tail */
512 if (offs >= tail->iov_len)
514 else if (copy > tail->iov_len - offs)
515 copy = tail->iov_len - offs;
517 _copy_from_pages((char *)tail->iov_base + offs,
519 buf->page_base + pglen + offs - len,
521 /* Do we also need to copy data from the head into the tail ? */
523 offs = copy = len - pglen;
524 if (copy > tail->iov_len)
525 copy = tail->iov_len;
526 memcpy(tail->iov_base,
527 (char *)head->iov_base +
528 head->iov_len - offs,
532 /* Now handle pages */
535 _shift_data_right_pages(buf->pages,
536 buf->page_base + len,
542 _copy_to_pages(buf->pages, buf->page_base,
543 (char *)head->iov_base + head->iov_len - len,
546 head->iov_len -= len;
551 xdr_shift_buf(struct xdr_buf *buf, size_t len)
553 xdr_shrink_bufhead(buf, len);