2 * linux/net/sunrpc/xdr.c
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
9 #include <linux/types.h>
10 #include <linux/socket.h>
11 #include <linux/string.h>
12 #include <linux/kernel.h>
13 #include <linux/pagemap.h>
14 #include <linux/errno.h>
16 #include <linux/sunrpc/xdr.h>
17 #include <linux/sunrpc/msg_prot.h>
20 * XDR functions for basic NFS types
23 xdr_encode_netobj(u32 *p, const struct xdr_netobj *obj)
25 unsigned int quadlen = XDR_QUADLEN(obj->len);
27 p[quadlen] = 0; /* zero trailing bytes */
28 *p++ = htonl(obj->len);
29 memcpy(p, obj->data, obj->len);
30 return p + XDR_QUADLEN(obj->len);
34 xdr_decode_netobj_fixed(u32 *p, void *obj, unsigned int len)
36 if (ntohl(*p++) != len)
39 return p + XDR_QUADLEN(len);
43 xdr_decode_netobj(u32 *p, struct xdr_netobj *obj)
47 if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ)
51 return p + XDR_QUADLEN(len);
55 xdr_encode_array(u32 *p, const char *array, unsigned int len)
57 int quadlen = XDR_QUADLEN(len);
61 memcpy(p, array, len);
66 xdr_encode_string(u32 *p, const char *string)
68 return xdr_encode_array(p, string, strlen(string));
72 xdr_decode_string(u32 *p, char **sp, int *lenp, int maxlen)
77 if ((len = ntohl(*p++)) > maxlen)
84 string = (char *) (p - 1);
85 memmove(string, p, len);
89 return p + XDR_QUADLEN(len);
93 xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen)
97 if ((len = ntohl(*p++)) > maxlen)
101 return p + XDR_QUADLEN(len);
106 xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
110 xdr->page_base = base;
114 struct iovec *iov = xdr->tail;
115 unsigned int pad = 4 - (len & 3);
117 iov->iov_base = (void *) "\0\0\0";
125 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
126 struct page **pages, unsigned int base, unsigned int len)
128 struct iovec *head = xdr->head;
129 struct iovec *tail = xdr->tail;
130 char *buf = (char *)head->iov_base;
131 unsigned int buflen = head->iov_len;
133 head->iov_len = offset;
136 xdr->page_base = base;
139 tail->iov_base = buf + offset;
140 tail->iov_len = buflen - offset;
146 * Realign the iovec if the server missed out some reply elements
147 * (such as post-op attributes,...)
148 * Note: This is a simple implementation that assumes that
149 * len <= iov->iov_len !!!
150 * The RPC header (assumed to be the 1st element in the iov array)
153 void xdr_shift_iovec(struct iovec *iov, int nr, size_t len)
157 for (pvec = iov + nr - 1; nr > 1; nr--, pvec--) {
158 struct iovec *svec = pvec - 1;
160 if (len > pvec->iov_len) {
161 printk(KERN_DEBUG "RPC: Urk! Large shift of short iovec.\n");
164 memmove((char *)pvec->iov_base + len, pvec->iov_base,
165 pvec->iov_len - len);
167 if (len > svec->iov_len) {
168 printk(KERN_DEBUG "RPC: Urk! Large shift of short iovec.\n");
171 memcpy(pvec->iov_base,
172 (char *)svec->iov_base + svec->iov_len - len, len);
177 * Map a struct xdr_buf into an iovec array.
179 int xdr_kmap(struct iovec *iov_base, struct xdr_buf *xdr, unsigned int base)
181 struct iovec *iov = iov_base;
182 struct page **ppage = xdr->pages;
183 unsigned int len, pglen = xdr->page_len;
185 len = xdr->head[0].iov_len;
187 iov->iov_len = len - base;
188 iov->iov_base = (char *)xdr->head[0].iov_base + base;
200 if (base || xdr->page_base) {
202 base += xdr->page_base;
203 ppage += base >> PAGE_CACHE_SHIFT;
204 base &= ~PAGE_CACHE_MASK;
207 len = PAGE_CACHE_SIZE;
208 iov->iov_base = kmap(*ppage);
210 iov->iov_base += base;
219 } while ((pglen -= len) != 0);
221 if (xdr->tail[0].iov_len) {
222 iov->iov_len = xdr->tail[0].iov_len - base;
223 iov->iov_base = (char *)xdr->tail[0].iov_base + base;
226 return (iov - iov_base);
229 void xdr_kunmap(struct xdr_buf *xdr, unsigned int base)
231 struct page **ppage = xdr->pages;
232 unsigned int pglen = xdr->page_len;
236 if (base > xdr->head[0].iov_len)
237 base -= xdr->head[0].iov_len;
243 if (base || xdr->page_base) {
245 base += xdr->page_base;
246 ppage += base >> PAGE_CACHE_SHIFT;
247 /* Note: The offset means that the length of the first
248 * page is really (PAGE_CACHE_SIZE - (base & ~PAGE_CACHE_MASK)).
249 * In order to avoid an extra test inside the loop,
250 * we bump pglen here, and just subtract PAGE_CACHE_SIZE... */
251 pglen += base & ~PAGE_CACHE_MASK;
254 flush_dcache_page(*ppage);
256 if (pglen <= PAGE_CACHE_SIZE)
258 pglen -= PAGE_CACHE_SIZE;
264 xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base,
266 skb_read_actor_t copy_actor)
268 struct page **ppage = xdr->pages;
269 unsigned int len, pglen = xdr->page_len;
272 len = xdr->head[0].iov_len;
275 ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
276 if (ret != len || !desc->count)
288 if (base || xdr->page_base) {
290 base += xdr->page_base;
291 ppage += base >> PAGE_CACHE_SHIFT;
292 base &= ~PAGE_CACHE_MASK;
297 len = PAGE_CACHE_SIZE;
298 kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA);
303 ret = copy_actor(desc, kaddr + base, len);
308 ret = copy_actor(desc, kaddr, len);
310 kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);
311 if (ret != len || !desc->count)
314 } while ((pglen -= len) != 0);
316 len = xdr->tail[0].iov_len;
318 copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len);
322 xdr_shift_buf(struct xdr_buf *xdr, size_t len)
324 struct iovec iov[MAX_IOVEC];
327 nr = xdr_kmap(iov, xdr, 0);
328 xdr_shift_iovec(iov, nr, len);