2 * linux/fs/nfsd/nfscache.c
4 * Request reply cache. This is currently a global cache, but this may
5 * change in the future and be a per-client cache.
7 * This code is heavily inspired by the 44BSD implementation, although
8 * it does things a bit differently.
10 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
18 #include <linux/sunrpc/svc.h>
19 #include <linux/nfsd/nfsd.h>
20 #include <linux/nfsd/cache.h>
22 /* Size of reply cache. Common values are:
28 #define CACHESIZE 1024
30 #define REQHASH(xid) ((((xid) >> 24) ^ (xid)) & (HASHSIZE-1))
32 struct nfscache_head {
33 struct svc_cacherep * next;
34 struct svc_cacherep * prev;
37 static struct nfscache_head * hash_list;
38 static struct svc_cacherep * lru_head;
39 static struct svc_cacherep * lru_tail;
40 static struct svc_cacherep * nfscache;
41 static int cache_disabled = 1;
43 static int nfsd_cache_append(struct svc_rqst *rqstp, struct svc_buf *data);
48 struct svc_cacherep *rp;
49 struct nfscache_head *rh;
54 i = CACHESIZE * sizeof (struct svc_cacherep);
55 for (order = 0; (PAGE_SIZE << order) < i; order++)
57 nfscache = (struct svc_cacherep *)
58 __get_free_pages(GFP_KERNEL, order);
60 printk (KERN_ERR "nfsd: cannot allocate %Zd bytes for reply cache\n", i);
63 memset(nfscache, 0, i);
65 i = HASHSIZE * sizeof (struct nfscache_head);
66 hash_list = kmalloc (i, GFP_KERNEL);
68 free_pages ((unsigned long)nfscache, order);
70 printk (KERN_ERR "nfsd: cannot allocate %Zd bytes for hash list\n", i);
74 for (i = 0, rh = hash_list; i < HASHSIZE; i++, rh++)
75 rh->next = rh->prev = (struct svc_cacherep *) rh;
77 for (i = 0, rp = nfscache; i < CACHESIZE; i++, rp++) {
78 rp->c_state = RC_UNUSED;
79 rp->c_type = RC_NOCACHE;
82 rp->c_lru_next = rp + 1;
83 rp->c_lru_prev = rp - 1;
86 lru_tail = nfscache + CACHESIZE - 1;
87 lru_head->c_lru_prev = NULL;
88 lru_tail->c_lru_next = NULL;
94 nfsd_cache_shutdown(void)
96 struct svc_cacherep *rp;
100 for (rp = lru_head; rp; rp = rp->c_lru_next) {
101 if (rp->c_state == RC_DONE && rp->c_type == RC_REPLBUFF)
102 kfree(rp->c_replbuf.buf);
107 i = CACHESIZE * sizeof (struct svc_cacherep);
108 for (order = 0; (PAGE_SIZE << order) < i; order++)
110 free_pages ((unsigned long)nfscache, order);
117 * Move cache entry to front of LRU list
120 lru_put_front(struct svc_cacherep *rp)
122 struct svc_cacherep *prev = rp->c_lru_prev,
123 *next = rp->c_lru_next;
126 prev->c_lru_next = next;
130 next->c_lru_prev = prev;
134 rp->c_lru_next = lru_head;
135 rp->c_lru_prev = NULL;
137 lru_head->c_lru_prev = rp;
142 * Move a cache entry from one hash list to another
145 hash_refile(struct svc_cacherep *rp)
147 struct svc_cacherep *prev = rp->c_hash_prev,
148 *next = rp->c_hash_next;
149 struct nfscache_head *head = hash_list + REQHASH(rp->c_xid);
151 prev->c_hash_next = next;
152 next->c_hash_prev = prev;
154 rp->c_hash_next = head->next;
155 rp->c_hash_prev = (struct svc_cacherep *) head;
156 head->next->c_hash_prev = rp;
161 * Try to find an entry matching the current call in the cache. When none
162 * is found, we grab the oldest unlocked entry off the LRU list.
163 * Note that no operation within the loop may sleep.
166 nfsd_cache_lookup(struct svc_rqst *rqstp, int type)
168 struct svc_cacherep *rh, *rp;
169 u32 xid = rqstp->rq_xid,
170 proto = rqstp->rq_prot,
171 vers = rqstp->rq_vers,
172 proc = rqstp->rq_proc;
175 rqstp->rq_cacherep = NULL;
176 if (cache_disabled || type == RC_NOCACHE) {
177 nfsdstats.rcnocache++;
181 rp = rh = (struct svc_cacherep *) &hash_list[REQHASH(xid)];
182 while ((rp = rp->c_hash_next) != rh) {
183 if (rp->c_state != RC_UNUSED &&
184 xid == rp->c_xid && proc == rp->c_proc &&
185 proto == rp->c_prot && vers == rp->c_vers &&
186 time_before(jiffies, rp->c_timestamp + 120*HZ) &&
187 memcmp((char*)&rqstp->rq_addr, (char*)&rp->c_addr, sizeof(rp->c_addr))==0) {
192 nfsdstats.rcmisses++;
194 /* This loop shouldn't take more than a few iterations normally */
197 for (rp = lru_tail; rp; rp = rp->c_lru_prev) {
198 if (rp->c_state != RC_INPROG)
200 if (safe++ > CACHESIZE) {
201 printk("nfsd: loop in repcache LRU list\n");
208 /* This should not happen */
210 static int complaints;
212 printk(KERN_WARNING "nfsd: all repcache entries locked!\n");
213 if (++complaints > 5) {
214 printk(KERN_WARNING "nfsd: disabling repcache.\n");
220 rqstp->rq_cacherep = rp;
221 rp->c_state = RC_INPROG;
224 rp->c_addr = rqstp->rq_addr;
227 rp->c_timestamp = jiffies;
231 /* release any buffer */
232 if (rp->c_type == RC_REPLBUFF) {
233 kfree(rp->c_replbuf.buf);
234 rp->c_replbuf.buf = NULL;
236 rp->c_type = RC_NOCACHE;
241 /* We found a matching entry which is either in progress or done. */
242 age = jiffies - rp->c_timestamp;
243 rp->c_timestamp = jiffies;
246 /* Request being processed or excessive rexmits */
247 if (rp->c_state == RC_INPROG || age < RC_DELAY)
250 /* From the hall of fame of impractical attacks:
251 * Is this a user who tries to snoop on the cache? */
252 if (!rqstp->rq_secure && rp->c_secure)
255 /* Compose RPC reply header */
256 switch (rp->c_type) {
260 svc_putlong(&rqstp->rq_resbuf, rp->c_replstat);
263 if (!nfsd_cache_append(rqstp, &rp->c_replbuf))
264 return RC_DOIT; /* should not happen */
267 printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
268 rp->c_state = RC_UNUSED;
276 * Update a cache entry. This is called from nfsd_dispatch when
277 * the procedure has been executed and the complete reply is in
280 * We're copying around data here rather than swapping buffers because
281 * the toplevel loop requires max-sized buffers, which would be a waste
282 * of memory for a cache with a max reply size of 100 bytes (diropokres).
284 * If we should start to use different types of cache entries tailored
285 * specifically for attrstat and fh's, we may save even more space.
287 * Also note that a cachetype of RC_NOCACHE can legally be passed when
288 * nfsd failed to encode a reply that otherwise would have been cached.
289 * In this case, nfsd_cache_update is called with statp == NULL.
292 nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, u32 *statp)
294 struct svc_cacherep *rp;
295 struct svc_buf *resp = &rqstp->rq_resbuf, *cachp;
298 if (!(rp = rqstp->rq_cacherep) || cache_disabled)
301 len = resp->len - (statp - resp->base);
303 /* Don't cache excessive amounts of data and XDR failures */
304 if (!statp || len > (256 >> 2)) {
305 rp->c_state = RC_UNUSED;
312 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
313 rp->c_replstat = *statp;
316 cachp = &rp->c_replbuf;
317 cachp->buf = (u32 *) kmalloc(len << 2, GFP_KERNEL);
319 rp->c_state = RC_UNUSED;
323 memcpy(cachp->buf, statp, len << 2);
328 rp->c_secure = rqstp->rq_secure;
329 rp->c_type = cachetype;
330 rp->c_state = RC_DONE;
331 rp->c_timestamp = jiffies;
337 * Copy cached reply to current reply buffer. Should always fit.
340 nfsd_cache_append(struct svc_rqst *rqstp, struct svc_buf *data)
342 struct svc_buf *resp = &rqstp->rq_resbuf;
344 if (resp->len + data->len > resp->buflen) {
345 printk(KERN_WARNING "nfsd: cached reply too large (%d).\n",
349 memcpy(resp->buf, data->buf, data->len << 2);
350 resp->buf += data->len;
351 resp->len += data->len;