Revert "f2fs: reuse nids more aggressively"
[linux] / fs / f2fs / node.c
1 /*
2  * fs/f2fs/node.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
18
19 #include "f2fs.h"
20 #include "node.h"
21 #include "segment.h"
22 #include "xattr.h"
23 #include "trace.h"
24 #include <trace/events/f2fs.h>
25
26 #define on_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
27
28 static struct kmem_cache *nat_entry_slab;
29 static struct kmem_cache *free_nid_slab;
30 static struct kmem_cache *nat_entry_set_slab;
31
32 bool available_free_memory(struct f2fs_sb_info *sbi, int type)
33 {
34         struct f2fs_nm_info *nm_i = NM_I(sbi);
35         struct sysinfo val;
36         unsigned long avail_ram;
37         unsigned long mem_size = 0;
38         bool res = false;
39
40         si_meminfo(&val);
41
42         /* only uses low memory */
43         avail_ram = val.totalram - val.totalhigh;
44
45         /*
46          * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
47          */
48         if (type == FREE_NIDS) {
49                 mem_size = (nm_i->nid_cnt[FREE_NID_LIST] *
50                                 sizeof(struct free_nid)) >> PAGE_SHIFT;
51                 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
52         } else if (type == NAT_ENTRIES) {
53                 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
54                                                         PAGE_SHIFT;
55                 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
56                 if (excess_cached_nats(sbi))
57                         res = false;
58         } else if (type == DIRTY_DENTS) {
59                 if (sbi->sb->s_bdi->wb.dirty_exceeded)
60                         return false;
61                 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
62                 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
63         } else if (type == INO_ENTRIES) {
64                 int i;
65
66                 for (i = 0; i <= UPDATE_INO; i++)
67                         mem_size += sbi->im[i].ino_num *
68                                                 sizeof(struct ino_entry);
69                 mem_size >>= PAGE_SHIFT;
70                 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
71         } else if (type == EXTENT_CACHE) {
72                 mem_size = (atomic_read(&sbi->total_ext_tree) *
73                                 sizeof(struct extent_tree) +
74                                 atomic_read(&sbi->total_ext_node) *
75                                 sizeof(struct extent_node)) >> PAGE_SHIFT;
76                 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
77         } else {
78                 if (!sbi->sb->s_bdi->wb.dirty_exceeded)
79                         return true;
80         }
81         return res;
82 }
83
84 static void clear_node_page_dirty(struct page *page)
85 {
86         struct address_space *mapping = page->mapping;
87         unsigned int long flags;
88
89         if (PageDirty(page)) {
90                 spin_lock_irqsave(&mapping->tree_lock, flags);
91                 radix_tree_tag_clear(&mapping->page_tree,
92                                 page_index(page),
93                                 PAGECACHE_TAG_DIRTY);
94                 spin_unlock_irqrestore(&mapping->tree_lock, flags);
95
96                 clear_page_dirty_for_io(page);
97                 dec_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
98         }
99         ClearPageUptodate(page);
100 }
101
102 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
103 {
104         pgoff_t index = current_nat_addr(sbi, nid);
105         return get_meta_page(sbi, index);
106 }
107
108 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
109 {
110         struct page *src_page;
111         struct page *dst_page;
112         pgoff_t src_off;
113         pgoff_t dst_off;
114         void *src_addr;
115         void *dst_addr;
116         struct f2fs_nm_info *nm_i = NM_I(sbi);
117
118         src_off = current_nat_addr(sbi, nid);
119         dst_off = next_nat_addr(sbi, src_off);
120
121         /* get current nat block page with lock */
122         src_page = get_meta_page(sbi, src_off);
123         dst_page = grab_meta_page(sbi, dst_off);
124         f2fs_bug_on(sbi, PageDirty(src_page));
125
126         src_addr = page_address(src_page);
127         dst_addr = page_address(dst_page);
128         memcpy(dst_addr, src_addr, PAGE_SIZE);
129         set_page_dirty(dst_page);
130         f2fs_put_page(src_page, 1);
131
132         set_to_next_nat(nm_i, nid);
133
134         return dst_page;
135 }
136
137 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
138 {
139         return radix_tree_lookup(&nm_i->nat_root, n);
140 }
141
142 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
143                 nid_t start, unsigned int nr, struct nat_entry **ep)
144 {
145         return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
146 }
147
148 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
149 {
150         list_del(&e->list);
151         radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
152         nm_i->nat_cnt--;
153         kmem_cache_free(nat_entry_slab, e);
154 }
155
156 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
157                                                 struct nat_entry *ne)
158 {
159         nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
160         struct nat_entry_set *head;
161
162         head = radix_tree_lookup(&nm_i->nat_set_root, set);
163         if (!head) {
164                 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
165
166                 INIT_LIST_HEAD(&head->entry_list);
167                 INIT_LIST_HEAD(&head->set_list);
168                 head->set = set;
169                 head->entry_cnt = 0;
170                 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
171         }
172
173         if (get_nat_flag(ne, IS_DIRTY))
174                 goto refresh_list;
175
176         nm_i->dirty_nat_cnt++;
177         head->entry_cnt++;
178         set_nat_flag(ne, IS_DIRTY, true);
179 refresh_list:
180         if (nat_get_blkaddr(ne) == NEW_ADDR)
181                 list_del_init(&ne->list);
182         else
183                 list_move_tail(&ne->list, &head->entry_list);
184 }
185
186 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
187                 struct nat_entry_set *set, struct nat_entry *ne)
188 {
189         list_move_tail(&ne->list, &nm_i->nat_entries);
190         set_nat_flag(ne, IS_DIRTY, false);
191         set->entry_cnt--;
192         nm_i->dirty_nat_cnt--;
193 }
194
195 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
196                 nid_t start, unsigned int nr, struct nat_entry_set **ep)
197 {
198         return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
199                                                         start, nr);
200 }
201
202 int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
203 {
204         struct f2fs_nm_info *nm_i = NM_I(sbi);
205         struct nat_entry *e;
206         bool need = false;
207
208         down_read(&nm_i->nat_tree_lock);
209         e = __lookup_nat_cache(nm_i, nid);
210         if (e) {
211                 if (!get_nat_flag(e, IS_CHECKPOINTED) &&
212                                 !get_nat_flag(e, HAS_FSYNCED_INODE))
213                         need = true;
214         }
215         up_read(&nm_i->nat_tree_lock);
216         return need;
217 }
218
219 bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
220 {
221         struct f2fs_nm_info *nm_i = NM_I(sbi);
222         struct nat_entry *e;
223         bool is_cp = true;
224
225         down_read(&nm_i->nat_tree_lock);
226         e = __lookup_nat_cache(nm_i, nid);
227         if (e && !get_nat_flag(e, IS_CHECKPOINTED))
228                 is_cp = false;
229         up_read(&nm_i->nat_tree_lock);
230         return is_cp;
231 }
232
233 bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
234 {
235         struct f2fs_nm_info *nm_i = NM_I(sbi);
236         struct nat_entry *e;
237         bool need_update = true;
238
239         down_read(&nm_i->nat_tree_lock);
240         e = __lookup_nat_cache(nm_i, ino);
241         if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
242                         (get_nat_flag(e, IS_CHECKPOINTED) ||
243                          get_nat_flag(e, HAS_FSYNCED_INODE)))
244                 need_update = false;
245         up_read(&nm_i->nat_tree_lock);
246         return need_update;
247 }
248
249 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
250                                                                 bool no_fail)
251 {
252         struct nat_entry *new;
253
254         if (no_fail) {
255                 new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS);
256                 f2fs_radix_tree_insert(&nm_i->nat_root, nid, new);
257         } else {
258                 new = kmem_cache_alloc(nat_entry_slab, GFP_NOFS);
259                 if (!new)
260                         return NULL;
261                 if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
262                         kmem_cache_free(nat_entry_slab, new);
263                         return NULL;
264                 }
265         }
266
267         memset(new, 0, sizeof(struct nat_entry));
268         nat_set_nid(new, nid);
269         nat_reset_flag(new);
270         list_add_tail(&new->list, &nm_i->nat_entries);
271         nm_i->nat_cnt++;
272         return new;
273 }
274
275 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
276                                                 struct f2fs_nat_entry *ne)
277 {
278         struct f2fs_nm_info *nm_i = NM_I(sbi);
279         struct nat_entry *e;
280
281         e = __lookup_nat_cache(nm_i, nid);
282         if (!e) {
283                 e = grab_nat_entry(nm_i, nid, false);
284                 if (e)
285                         node_info_from_raw_nat(&e->ni, ne);
286         } else {
287                 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
288                                 nat_get_blkaddr(e) !=
289                                         le32_to_cpu(ne->block_addr) ||
290                                 nat_get_version(e) != ne->version);
291         }
292 }
293
294 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
295                         block_t new_blkaddr, bool fsync_done)
296 {
297         struct f2fs_nm_info *nm_i = NM_I(sbi);
298         struct nat_entry *e;
299
300         down_write(&nm_i->nat_tree_lock);
301         e = __lookup_nat_cache(nm_i, ni->nid);
302         if (!e) {
303                 e = grab_nat_entry(nm_i, ni->nid, true);
304                 copy_node_info(&e->ni, ni);
305                 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
306         } else if (new_blkaddr == NEW_ADDR) {
307                 /*
308                  * when nid is reallocated,
309                  * previous nat entry can be remained in nat cache.
310                  * So, reinitialize it with new information.
311                  */
312                 copy_node_info(&e->ni, ni);
313                 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
314         }
315
316         /* sanity check */
317         f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
318         f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
319                         new_blkaddr == NULL_ADDR);
320         f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
321                         new_blkaddr == NEW_ADDR);
322         f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR &&
323                         nat_get_blkaddr(e) != NULL_ADDR &&
324                         new_blkaddr == NEW_ADDR);
325
326         /* increment version no as node is removed */
327         if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
328                 unsigned char version = nat_get_version(e);
329                 nat_set_version(e, inc_node_version(version));
330         }
331
332         /* change address */
333         nat_set_blkaddr(e, new_blkaddr);
334         if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR)
335                 set_nat_flag(e, IS_CHECKPOINTED, false);
336         __set_nat_cache_dirty(nm_i, e);
337
338         /* update fsync_mark if its inode nat entry is still alive */
339         if (ni->nid != ni->ino)
340                 e = __lookup_nat_cache(nm_i, ni->ino);
341         if (e) {
342                 if (fsync_done && ni->nid == ni->ino)
343                         set_nat_flag(e, HAS_FSYNCED_INODE, true);
344                 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
345         }
346         up_write(&nm_i->nat_tree_lock);
347 }
348
349 int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
350 {
351         struct f2fs_nm_info *nm_i = NM_I(sbi);
352         int nr = nr_shrink;
353
354         if (!down_write_trylock(&nm_i->nat_tree_lock))
355                 return 0;
356
357         while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
358                 struct nat_entry *ne;
359                 ne = list_first_entry(&nm_i->nat_entries,
360                                         struct nat_entry, list);
361                 __del_from_nat_cache(nm_i, ne);
362                 nr_shrink--;
363         }
364         up_write(&nm_i->nat_tree_lock);
365         return nr - nr_shrink;
366 }
367
368 /*
369  * This function always returns success
370  */
371 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
372 {
373         struct f2fs_nm_info *nm_i = NM_I(sbi);
374         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
375         struct f2fs_journal *journal = curseg->journal;
376         nid_t start_nid = START_NID(nid);
377         struct f2fs_nat_block *nat_blk;
378         struct page *page = NULL;
379         struct f2fs_nat_entry ne;
380         struct nat_entry *e;
381         pgoff_t index;
382         int i;
383
384         ni->nid = nid;
385
386         /* Check nat cache */
387         down_read(&nm_i->nat_tree_lock);
388         e = __lookup_nat_cache(nm_i, nid);
389         if (e) {
390                 ni->ino = nat_get_ino(e);
391                 ni->blk_addr = nat_get_blkaddr(e);
392                 ni->version = nat_get_version(e);
393                 up_read(&nm_i->nat_tree_lock);
394                 return;
395         }
396
397         memset(&ne, 0, sizeof(struct f2fs_nat_entry));
398
399         /* Check current segment summary */
400         down_read(&curseg->journal_rwsem);
401         i = lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
402         if (i >= 0) {
403                 ne = nat_in_journal(journal, i);
404                 node_info_from_raw_nat(ni, &ne);
405         }
406         up_read(&curseg->journal_rwsem);
407         if (i >= 0) {
408                 up_read(&nm_i->nat_tree_lock);
409                 goto cache;
410         }
411
412         /* Fill node_info from nat page */
413         index = current_nat_addr(sbi, nid);
414         up_read(&nm_i->nat_tree_lock);
415
416         page = get_meta_page(sbi, index);
417         nat_blk = (struct f2fs_nat_block *)page_address(page);
418         ne = nat_blk->entries[nid - start_nid];
419         node_info_from_raw_nat(ni, &ne);
420         f2fs_put_page(page, 1);
421 cache:
422         /* cache nat entry */
423         down_write(&nm_i->nat_tree_lock);
424         cache_nat_entry(sbi, nid, &ne);
425         up_write(&nm_i->nat_tree_lock);
426 }
427
428 /*
429  * readahead MAX_RA_NODE number of node pages.
430  */
431 static void ra_node_pages(struct page *parent, int start, int n)
432 {
433         struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
434         struct blk_plug plug;
435         int i, end;
436         nid_t nid;
437
438         blk_start_plug(&plug);
439
440         /* Then, try readahead for siblings of the desired node */
441         end = start + n;
442         end = min(end, NIDS_PER_BLOCK);
443         for (i = start; i < end; i++) {
444                 nid = get_nid(parent, i, false);
445                 ra_node_page(sbi, nid);
446         }
447
448         blk_finish_plug(&plug);
449 }
450
451 pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
452 {
453         const long direct_index = ADDRS_PER_INODE(dn->inode);
454         const long direct_blks = ADDRS_PER_BLOCK;
455         const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
456         unsigned int skipped_unit = ADDRS_PER_BLOCK;
457         int cur_level = dn->cur_level;
458         int max_level = dn->max_level;
459         pgoff_t base = 0;
460
461         if (!dn->max_level)
462                 return pgofs + 1;
463
464         while (max_level-- > cur_level)
465                 skipped_unit *= NIDS_PER_BLOCK;
466
467         switch (dn->max_level) {
468         case 3:
469                 base += 2 * indirect_blks;
470         case 2:
471                 base += 2 * direct_blks;
472         case 1:
473                 base += direct_index;
474                 break;
475         default:
476                 f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
477         }
478
479         return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
480 }
481
482 /*
483  * The maximum depth is four.
484  * Offset[0] will have raw inode offset.
485  */
486 static int get_node_path(struct inode *inode, long block,
487                                 int offset[4], unsigned int noffset[4])
488 {
489         const long direct_index = ADDRS_PER_INODE(inode);
490         const long direct_blks = ADDRS_PER_BLOCK;
491         const long dptrs_per_blk = NIDS_PER_BLOCK;
492         const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
493         const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
494         int n = 0;
495         int level = 0;
496
497         noffset[0] = 0;
498
499         if (block < direct_index) {
500                 offset[n] = block;
501                 goto got;
502         }
503         block -= direct_index;
504         if (block < direct_blks) {
505                 offset[n++] = NODE_DIR1_BLOCK;
506                 noffset[n] = 1;
507                 offset[n] = block;
508                 level = 1;
509                 goto got;
510         }
511         block -= direct_blks;
512         if (block < direct_blks) {
513                 offset[n++] = NODE_DIR2_BLOCK;
514                 noffset[n] = 2;
515                 offset[n] = block;
516                 level = 1;
517                 goto got;
518         }
519         block -= direct_blks;
520         if (block < indirect_blks) {
521                 offset[n++] = NODE_IND1_BLOCK;
522                 noffset[n] = 3;
523                 offset[n++] = block / direct_blks;
524                 noffset[n] = 4 + offset[n - 1];
525                 offset[n] = block % direct_blks;
526                 level = 2;
527                 goto got;
528         }
529         block -= indirect_blks;
530         if (block < indirect_blks) {
531                 offset[n++] = NODE_IND2_BLOCK;
532                 noffset[n] = 4 + dptrs_per_blk;
533                 offset[n++] = block / direct_blks;
534                 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
535                 offset[n] = block % direct_blks;
536                 level = 2;
537                 goto got;
538         }
539         block -= indirect_blks;
540         if (block < dindirect_blks) {
541                 offset[n++] = NODE_DIND_BLOCK;
542                 noffset[n] = 5 + (dptrs_per_blk * 2);
543                 offset[n++] = block / indirect_blks;
544                 noffset[n] = 6 + (dptrs_per_blk * 2) +
545                               offset[n - 1] * (dptrs_per_blk + 1);
546                 offset[n++] = (block / direct_blks) % dptrs_per_blk;
547                 noffset[n] = 7 + (dptrs_per_blk * 2) +
548                               offset[n - 2] * (dptrs_per_blk + 1) +
549                               offset[n - 1];
550                 offset[n] = block % direct_blks;
551                 level = 3;
552                 goto got;
553         } else {
554                 return -E2BIG;
555         }
556 got:
557         return level;
558 }
559
560 /*
561  * Caller should call f2fs_put_dnode(dn).
562  * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
563  * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
564  * In the case of RDONLY_NODE, we don't need to care about mutex.
565  */
566 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
567 {
568         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
569         struct page *npage[4];
570         struct page *parent = NULL;
571         int offset[4];
572         unsigned int noffset[4];
573         nid_t nids[4];
574         int level, i = 0;
575         int err = 0;
576
577         level = get_node_path(dn->inode, index, offset, noffset);
578         if (level < 0)
579                 return level;
580
581         nids[0] = dn->inode->i_ino;
582         npage[0] = dn->inode_page;
583
584         if (!npage[0]) {
585                 npage[0] = get_node_page(sbi, nids[0]);
586                 if (IS_ERR(npage[0]))
587                         return PTR_ERR(npage[0]);
588         }
589
590         /* if inline_data is set, should not report any block indices */
591         if (f2fs_has_inline_data(dn->inode) && index) {
592                 err = -ENOENT;
593                 f2fs_put_page(npage[0], 1);
594                 goto release_out;
595         }
596
597         parent = npage[0];
598         if (level != 0)
599                 nids[1] = get_nid(parent, offset[0], true);
600         dn->inode_page = npage[0];
601         dn->inode_page_locked = true;
602
603         /* get indirect or direct nodes */
604         for (i = 1; i <= level; i++) {
605                 bool done = false;
606
607                 if (!nids[i] && mode == ALLOC_NODE) {
608                         /* alloc new node */
609                         if (!alloc_nid(sbi, &(nids[i]))) {
610                                 err = -ENOSPC;
611                                 goto release_pages;
612                         }
613
614                         dn->nid = nids[i];
615                         npage[i] = new_node_page(dn, noffset[i]);
616                         if (IS_ERR(npage[i])) {
617                                 alloc_nid_failed(sbi, nids[i]);
618                                 err = PTR_ERR(npage[i]);
619                                 goto release_pages;
620                         }
621
622                         set_nid(parent, offset[i - 1], nids[i], i == 1);
623                         alloc_nid_done(sbi, nids[i]);
624                         done = true;
625                 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
626                         npage[i] = get_node_page_ra(parent, offset[i - 1]);
627                         if (IS_ERR(npage[i])) {
628                                 err = PTR_ERR(npage[i]);
629                                 goto release_pages;
630                         }
631                         done = true;
632                 }
633                 if (i == 1) {
634                         dn->inode_page_locked = false;
635                         unlock_page(parent);
636                 } else {
637                         f2fs_put_page(parent, 1);
638                 }
639
640                 if (!done) {
641                         npage[i] = get_node_page(sbi, nids[i]);
642                         if (IS_ERR(npage[i])) {
643                                 err = PTR_ERR(npage[i]);
644                                 f2fs_put_page(npage[0], 0);
645                                 goto release_out;
646                         }
647                 }
648                 if (i < level) {
649                         parent = npage[i];
650                         nids[i + 1] = get_nid(parent, offset[i], false);
651                 }
652         }
653         dn->nid = nids[level];
654         dn->ofs_in_node = offset[level];
655         dn->node_page = npage[level];
656         dn->data_blkaddr = datablock_addr(dn->inode,
657                                 dn->node_page, dn->ofs_in_node);
658         return 0;
659
660 release_pages:
661         f2fs_put_page(parent, 1);
662         if (i > 1)
663                 f2fs_put_page(npage[0], 0);
664 release_out:
665         dn->inode_page = NULL;
666         dn->node_page = NULL;
667         if (err == -ENOENT) {
668                 dn->cur_level = i;
669                 dn->max_level = level;
670                 dn->ofs_in_node = offset[level];
671         }
672         return err;
673 }
674
675 static void truncate_node(struct dnode_of_data *dn)
676 {
677         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
678         struct node_info ni;
679
680         get_node_info(sbi, dn->nid, &ni);
681         f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
682
683         /* Deallocate node address */
684         invalidate_blocks(sbi, ni.blk_addr);
685         dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
686         set_node_addr(sbi, &ni, NULL_ADDR, false);
687
688         if (dn->nid == dn->inode->i_ino) {
689                 remove_orphan_inode(sbi, dn->nid);
690                 dec_valid_inode_count(sbi);
691                 f2fs_inode_synced(dn->inode);
692         }
693
694         clear_node_page_dirty(dn->node_page);
695         set_sbi_flag(sbi, SBI_IS_DIRTY);
696
697         f2fs_put_page(dn->node_page, 1);
698
699         invalidate_mapping_pages(NODE_MAPPING(sbi),
700                         dn->node_page->index, dn->node_page->index);
701
702         dn->node_page = NULL;
703         trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
704 }
705
706 static int truncate_dnode(struct dnode_of_data *dn)
707 {
708         struct page *page;
709
710         if (dn->nid == 0)
711                 return 1;
712
713         /* get direct node */
714         page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
715         if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
716                 return 1;
717         else if (IS_ERR(page))
718                 return PTR_ERR(page);
719
720         /* Make dnode_of_data for parameter */
721         dn->node_page = page;
722         dn->ofs_in_node = 0;
723         truncate_data_blocks(dn);
724         truncate_node(dn);
725         return 1;
726 }
727
728 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
729                                                 int ofs, int depth)
730 {
731         struct dnode_of_data rdn = *dn;
732         struct page *page;
733         struct f2fs_node *rn;
734         nid_t child_nid;
735         unsigned int child_nofs;
736         int freed = 0;
737         int i, ret;
738
739         if (dn->nid == 0)
740                 return NIDS_PER_BLOCK + 1;
741
742         trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
743
744         page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
745         if (IS_ERR(page)) {
746                 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
747                 return PTR_ERR(page);
748         }
749
750         ra_node_pages(page, ofs, NIDS_PER_BLOCK);
751
752         rn = F2FS_NODE(page);
753         if (depth < 3) {
754                 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
755                         child_nid = le32_to_cpu(rn->in.nid[i]);
756                         if (child_nid == 0)
757                                 continue;
758                         rdn.nid = child_nid;
759                         ret = truncate_dnode(&rdn);
760                         if (ret < 0)
761                                 goto out_err;
762                         if (set_nid(page, i, 0, false))
763                                 dn->node_changed = true;
764                 }
765         } else {
766                 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
767                 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
768                         child_nid = le32_to_cpu(rn->in.nid[i]);
769                         if (child_nid == 0) {
770                                 child_nofs += NIDS_PER_BLOCK + 1;
771                                 continue;
772                         }
773                         rdn.nid = child_nid;
774                         ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
775                         if (ret == (NIDS_PER_BLOCK + 1)) {
776                                 if (set_nid(page, i, 0, false))
777                                         dn->node_changed = true;
778                                 child_nofs += ret;
779                         } else if (ret < 0 && ret != -ENOENT) {
780                                 goto out_err;
781                         }
782                 }
783                 freed = child_nofs;
784         }
785
786         if (!ofs) {
787                 /* remove current indirect node */
788                 dn->node_page = page;
789                 truncate_node(dn);
790                 freed++;
791         } else {
792                 f2fs_put_page(page, 1);
793         }
794         trace_f2fs_truncate_nodes_exit(dn->inode, freed);
795         return freed;
796
797 out_err:
798         f2fs_put_page(page, 1);
799         trace_f2fs_truncate_nodes_exit(dn->inode, ret);
800         return ret;
801 }
802
803 static int truncate_partial_nodes(struct dnode_of_data *dn,
804                         struct f2fs_inode *ri, int *offset, int depth)
805 {
806         struct page *pages[2];
807         nid_t nid[3];
808         nid_t child_nid;
809         int err = 0;
810         int i;
811         int idx = depth - 2;
812
813         nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
814         if (!nid[0])
815                 return 0;
816
817         /* get indirect nodes in the path */
818         for (i = 0; i < idx + 1; i++) {
819                 /* reference count'll be increased */
820                 pages[i] = get_node_page(F2FS_I_SB(dn->inode), nid[i]);
821                 if (IS_ERR(pages[i])) {
822                         err = PTR_ERR(pages[i]);
823                         idx = i - 1;
824                         goto fail;
825                 }
826                 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
827         }
828
829         ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
830
831         /* free direct nodes linked to a partial indirect node */
832         for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
833                 child_nid = get_nid(pages[idx], i, false);
834                 if (!child_nid)
835                         continue;
836                 dn->nid = child_nid;
837                 err = truncate_dnode(dn);
838                 if (err < 0)
839                         goto fail;
840                 if (set_nid(pages[idx], i, 0, false))
841                         dn->node_changed = true;
842         }
843
844         if (offset[idx + 1] == 0) {
845                 dn->node_page = pages[idx];
846                 dn->nid = nid[idx];
847                 truncate_node(dn);
848         } else {
849                 f2fs_put_page(pages[idx], 1);
850         }
851         offset[idx]++;
852         offset[idx + 1] = 0;
853         idx--;
854 fail:
855         for (i = idx; i >= 0; i--)
856                 f2fs_put_page(pages[i], 1);
857
858         trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
859
860         return err;
861 }
862
863 /*
864  * All the block addresses of data and nodes should be nullified.
865  */
866 int truncate_inode_blocks(struct inode *inode, pgoff_t from)
867 {
868         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
869         int err = 0, cont = 1;
870         int level, offset[4], noffset[4];
871         unsigned int nofs = 0;
872         struct f2fs_inode *ri;
873         struct dnode_of_data dn;
874         struct page *page;
875
876         trace_f2fs_truncate_inode_blocks_enter(inode, from);
877
878         level = get_node_path(inode, from, offset, noffset);
879         if (level < 0)
880                 return level;
881
882         page = get_node_page(sbi, inode->i_ino);
883         if (IS_ERR(page)) {
884                 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
885                 return PTR_ERR(page);
886         }
887
888         set_new_dnode(&dn, inode, page, NULL, 0);
889         unlock_page(page);
890
891         ri = F2FS_INODE(page);
892         switch (level) {
893         case 0:
894         case 1:
895                 nofs = noffset[1];
896                 break;
897         case 2:
898                 nofs = noffset[1];
899                 if (!offset[level - 1])
900                         goto skip_partial;
901                 err = truncate_partial_nodes(&dn, ri, offset, level);
902                 if (err < 0 && err != -ENOENT)
903                         goto fail;
904                 nofs += 1 + NIDS_PER_BLOCK;
905                 break;
906         case 3:
907                 nofs = 5 + 2 * NIDS_PER_BLOCK;
908                 if (!offset[level - 1])
909                         goto skip_partial;
910                 err = truncate_partial_nodes(&dn, ri, offset, level);
911                 if (err < 0 && err != -ENOENT)
912                         goto fail;
913                 break;
914         default:
915                 BUG();
916         }
917
918 skip_partial:
919         while (cont) {
920                 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
921                 switch (offset[0]) {
922                 case NODE_DIR1_BLOCK:
923                 case NODE_DIR2_BLOCK:
924                         err = truncate_dnode(&dn);
925                         break;
926
927                 case NODE_IND1_BLOCK:
928                 case NODE_IND2_BLOCK:
929                         err = truncate_nodes(&dn, nofs, offset[1], 2);
930                         break;
931
932                 case NODE_DIND_BLOCK:
933                         err = truncate_nodes(&dn, nofs, offset[1], 3);
934                         cont = 0;
935                         break;
936
937                 default:
938                         BUG();
939                 }
940                 if (err < 0 && err != -ENOENT)
941                         goto fail;
942                 if (offset[1] == 0 &&
943                                 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
944                         lock_page(page);
945                         BUG_ON(page->mapping != NODE_MAPPING(sbi));
946                         f2fs_wait_on_page_writeback(page, NODE, true);
947                         ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
948                         set_page_dirty(page);
949                         unlock_page(page);
950                 }
951                 offset[1] = 0;
952                 offset[0]++;
953                 nofs += err;
954         }
955 fail:
956         f2fs_put_page(page, 0);
957         trace_f2fs_truncate_inode_blocks_exit(inode, err);
958         return err > 0 ? 0 : err;
959 }
960
961 int truncate_xattr_node(struct inode *inode, struct page *page)
962 {
963         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
964         nid_t nid = F2FS_I(inode)->i_xattr_nid;
965         struct dnode_of_data dn;
966         struct page *npage;
967
968         if (!nid)
969                 return 0;
970
971         npage = get_node_page(sbi, nid);
972         if (IS_ERR(npage))
973                 return PTR_ERR(npage);
974
975         f2fs_i_xnid_write(inode, 0);
976
977         set_new_dnode(&dn, inode, page, npage, nid);
978
979         if (page)
980                 dn.inode_page_locked = true;
981         truncate_node(&dn);
982         return 0;
983 }
984
985 /*
986  * Caller should grab and release a rwsem by calling f2fs_lock_op() and
987  * f2fs_unlock_op().
988  */
989 int remove_inode_page(struct inode *inode)
990 {
991         struct dnode_of_data dn;
992         int err;
993
994         set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
995         err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
996         if (err)
997                 return err;
998
999         err = truncate_xattr_node(inode, dn.inode_page);
1000         if (err) {
1001                 f2fs_put_dnode(&dn);
1002                 return err;
1003         }
1004
1005         /* remove potential inline_data blocks */
1006         if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1007                                 S_ISLNK(inode->i_mode))
1008                 truncate_data_blocks_range(&dn, 1);
1009
1010         /* 0 is possible, after f2fs_new_inode() has failed */
1011         f2fs_bug_on(F2FS_I_SB(inode),
1012                         inode->i_blocks != 0 && inode->i_blocks != 8);
1013
1014         /* will put inode & node pages */
1015         truncate_node(&dn);
1016         return 0;
1017 }
1018
1019 struct page *new_inode_page(struct inode *inode)
1020 {
1021         struct dnode_of_data dn;
1022
1023         /* allocate inode page for new inode */
1024         set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1025
1026         /* caller should f2fs_put_page(page, 1); */
1027         return new_node_page(&dn, 0);
1028 }
1029
1030 struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs)
1031 {
1032         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1033         struct node_info new_ni;
1034         struct page *page;
1035         int err;
1036
1037         if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1038                 return ERR_PTR(-EPERM);
1039
1040         page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1041         if (!page)
1042                 return ERR_PTR(-ENOMEM);
1043
1044         if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
1045                 goto fail;
1046
1047 #ifdef CONFIG_F2FS_CHECK_FS
1048         get_node_info(sbi, dn->nid, &new_ni);
1049         f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
1050 #endif
1051         new_ni.nid = dn->nid;
1052         new_ni.ino = dn->inode->i_ino;
1053         new_ni.blk_addr = NULL_ADDR;
1054         new_ni.flag = 0;
1055         new_ni.version = 0;
1056         set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1057
1058         f2fs_wait_on_page_writeback(page, NODE, true);
1059         fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1060         set_cold_node(dn->inode, page);
1061         if (!PageUptodate(page))
1062                 SetPageUptodate(page);
1063         if (set_page_dirty(page))
1064                 dn->node_changed = true;
1065
1066         if (f2fs_has_xattr_block(ofs))
1067                 f2fs_i_xnid_write(dn->inode, dn->nid);
1068
1069         if (ofs == 0)
1070                 inc_valid_inode_count(sbi);
1071         return page;
1072
1073 fail:
1074         clear_node_page_dirty(page);
1075         f2fs_put_page(page, 1);
1076         return ERR_PTR(err);
1077 }
1078
1079 /*
1080  * Caller should do after getting the following values.
1081  * 0: f2fs_put_page(page, 0)
1082  * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1083  */
1084 static int read_node_page(struct page *page, int op_flags)
1085 {
1086         struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1087         struct node_info ni;
1088         struct f2fs_io_info fio = {
1089                 .sbi = sbi,
1090                 .type = NODE,
1091                 .op = REQ_OP_READ,
1092                 .op_flags = op_flags,
1093                 .page = page,
1094                 .encrypted_page = NULL,
1095         };
1096
1097         if (PageUptodate(page))
1098                 return LOCKED_PAGE;
1099
1100         get_node_info(sbi, page->index, &ni);
1101
1102         if (unlikely(ni.blk_addr == NULL_ADDR)) {
1103                 ClearPageUptodate(page);
1104                 return -ENOENT;
1105         }
1106
1107         fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1108         return f2fs_submit_page_bio(&fio);
1109 }
1110
1111 /*
1112  * Readahead a node page
1113  */
1114 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1115 {
1116         struct page *apage;
1117         int err;
1118
1119         if (!nid)
1120                 return;
1121         f2fs_bug_on(sbi, check_nid_range(sbi, nid));
1122
1123         rcu_read_lock();
1124         apage = radix_tree_lookup(&NODE_MAPPING(sbi)->page_tree, nid);
1125         rcu_read_unlock();
1126         if (apage)
1127                 return;
1128
1129         apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1130         if (!apage)
1131                 return;
1132
1133         err = read_node_page(apage, REQ_RAHEAD);
1134         f2fs_put_page(apage, err ? 1 : 0);
1135 }
1136
1137 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1138                                         struct page *parent, int start)
1139 {
1140         struct page *page;
1141         int err;
1142
1143         if (!nid)
1144                 return ERR_PTR(-ENOENT);
1145         f2fs_bug_on(sbi, check_nid_range(sbi, nid));
1146 repeat:
1147         page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1148         if (!page)
1149                 return ERR_PTR(-ENOMEM);
1150
1151         err = read_node_page(page, 0);
1152         if (err < 0) {
1153                 f2fs_put_page(page, 1);
1154                 return ERR_PTR(err);
1155         } else if (err == LOCKED_PAGE) {
1156                 err = 0;
1157                 goto page_hit;
1158         }
1159
1160         if (parent)
1161                 ra_node_pages(parent, start + 1, MAX_RA_NODE);
1162
1163         lock_page(page);
1164
1165         if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1166                 f2fs_put_page(page, 1);
1167                 goto repeat;
1168         }
1169
1170         if (unlikely(!PageUptodate(page))) {
1171                 err = -EIO;
1172                 goto out_err;
1173         }
1174
1175         if (!f2fs_inode_chksum_verify(sbi, page)) {
1176                 err = -EBADMSG;
1177                 goto out_err;
1178         }
1179 page_hit:
1180         if(unlikely(nid != nid_of_node(page))) {
1181                 f2fs_msg(sbi->sb, KERN_WARNING, "inconsistent node block, "
1182                         "nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1183                         nid, nid_of_node(page), ino_of_node(page),
1184                         ofs_of_node(page), cpver_of_node(page),
1185                         next_blkaddr_of_node(page));
1186                 err = -EINVAL;
1187 out_err:
1188                 ClearPageUptodate(page);
1189                 f2fs_put_page(page, 1);
1190                 return ERR_PTR(err);
1191         }
1192         return page;
1193 }
1194
1195 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1196 {
1197         return __get_node_page(sbi, nid, NULL, 0);
1198 }
1199
1200 struct page *get_node_page_ra(struct page *parent, int start)
1201 {
1202         struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
1203         nid_t nid = get_nid(parent, start, false);
1204
1205         return __get_node_page(sbi, nid, parent, start);
1206 }
1207
1208 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1209 {
1210         struct inode *inode;
1211         struct page *page;
1212         int ret;
1213
1214         /* should flush inline_data before evict_inode */
1215         inode = ilookup(sbi->sb, ino);
1216         if (!inode)
1217                 return;
1218
1219         page = pagecache_get_page(inode->i_mapping, 0, FGP_LOCK|FGP_NOWAIT, 0);
1220         if (!page)
1221                 goto iput_out;
1222
1223         if (!PageUptodate(page))
1224                 goto page_out;
1225
1226         if (!PageDirty(page))
1227                 goto page_out;
1228
1229         if (!clear_page_dirty_for_io(page))
1230                 goto page_out;
1231
1232         ret = f2fs_write_inline_data(inode, page);
1233         inode_dec_dirty_pages(inode);
1234         remove_dirty_inode(inode);
1235         if (ret)
1236                 set_page_dirty(page);
1237 page_out:
1238         f2fs_put_page(page, 1);
1239 iput_out:
1240         iput(inode);
1241 }
1242
1243 void move_node_page(struct page *node_page, int gc_type)
1244 {
1245         if (gc_type == FG_GC) {
1246                 struct f2fs_sb_info *sbi = F2FS_P_SB(node_page);
1247                 struct writeback_control wbc = {
1248                         .sync_mode = WB_SYNC_ALL,
1249                         .nr_to_write = 1,
1250                         .for_reclaim = 0,
1251                 };
1252
1253                 set_page_dirty(node_page);
1254                 f2fs_wait_on_page_writeback(node_page, NODE, true);
1255
1256                 f2fs_bug_on(sbi, PageWriteback(node_page));
1257                 if (!clear_page_dirty_for_io(node_page))
1258                         goto out_page;
1259
1260                 if (NODE_MAPPING(sbi)->a_ops->writepage(node_page, &wbc))
1261                         unlock_page(node_page);
1262                 goto release_page;
1263         } else {
1264                 /* set page dirty and write it */
1265                 if (!PageWriteback(node_page))
1266                         set_page_dirty(node_page);
1267         }
1268 out_page:
1269         unlock_page(node_page);
1270 release_page:
1271         f2fs_put_page(node_page, 0);
1272 }
1273
1274 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1275 {
1276         pgoff_t index, end;
1277         struct pagevec pvec;
1278         struct page *last_page = NULL;
1279
1280         pagevec_init(&pvec, 0);
1281         index = 0;
1282         end = ULONG_MAX;
1283
1284         while (index <= end) {
1285                 int i, nr_pages;
1286                 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1287                                 PAGECACHE_TAG_DIRTY,
1288                                 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1289                 if (nr_pages == 0)
1290                         break;
1291
1292                 for (i = 0; i < nr_pages; i++) {
1293                         struct page *page = pvec.pages[i];
1294
1295                         if (unlikely(f2fs_cp_error(sbi))) {
1296                                 f2fs_put_page(last_page, 0);
1297                                 pagevec_release(&pvec);
1298                                 return ERR_PTR(-EIO);
1299                         }
1300
1301                         if (!IS_DNODE(page) || !is_cold_node(page))
1302                                 continue;
1303                         if (ino_of_node(page) != ino)
1304                                 continue;
1305
1306                         lock_page(page);
1307
1308                         if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1309 continue_unlock:
1310                                 unlock_page(page);
1311                                 continue;
1312                         }
1313                         if (ino_of_node(page) != ino)
1314                                 goto continue_unlock;
1315
1316                         if (!PageDirty(page)) {
1317                                 /* someone wrote it for us */
1318                                 goto continue_unlock;
1319                         }
1320
1321                         if (last_page)
1322                                 f2fs_put_page(last_page, 0);
1323
1324                         get_page(page);
1325                         last_page = page;
1326                         unlock_page(page);
1327                 }
1328                 pagevec_release(&pvec);
1329                 cond_resched();
1330         }
1331         return last_page;
1332 }
1333
1334 static int __write_node_page(struct page *page, bool atomic, bool *submitted,
1335                                 struct writeback_control *wbc, bool do_balance,
1336                                 enum iostat_type io_type)
1337 {
1338         struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1339         nid_t nid;
1340         struct node_info ni;
1341         struct f2fs_io_info fio = {
1342                 .sbi = sbi,
1343                 .type = NODE,
1344                 .op = REQ_OP_WRITE,
1345                 .op_flags = wbc_to_write_flags(wbc),
1346                 .page = page,
1347                 .encrypted_page = NULL,
1348                 .submitted = false,
1349                 .io_type = io_type,
1350         };
1351
1352         trace_f2fs_writepage(page, NODE);
1353
1354         if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1355                 goto redirty_out;
1356         if (unlikely(f2fs_cp_error(sbi)))
1357                 goto redirty_out;
1358
1359         /* get old block addr of this node page */
1360         nid = nid_of_node(page);
1361         f2fs_bug_on(sbi, page->index != nid);
1362
1363         if (wbc->for_reclaim) {
1364                 if (!down_read_trylock(&sbi->node_write))
1365                         goto redirty_out;
1366         } else {
1367                 down_read(&sbi->node_write);
1368         }
1369
1370         get_node_info(sbi, nid, &ni);
1371
1372         /* This page is already truncated */
1373         if (unlikely(ni.blk_addr == NULL_ADDR)) {
1374                 ClearPageUptodate(page);
1375                 dec_page_count(sbi, F2FS_DIRTY_NODES);
1376                 up_read(&sbi->node_write);
1377                 unlock_page(page);
1378                 return 0;
1379         }
1380
1381         if (atomic && !test_opt(sbi, NOBARRIER))
1382                 fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1383
1384         set_page_writeback(page);
1385         fio.old_blkaddr = ni.blk_addr;
1386         write_node_page(nid, &fio);
1387         set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1388         dec_page_count(sbi, F2FS_DIRTY_NODES);
1389         up_read(&sbi->node_write);
1390
1391         if (wbc->for_reclaim) {
1392                 f2fs_submit_merged_write_cond(sbi, page->mapping->host, 0,
1393                                                 page->index, NODE);
1394                 submitted = NULL;
1395         }
1396
1397         unlock_page(page);
1398
1399         if (unlikely(f2fs_cp_error(sbi))) {
1400                 f2fs_submit_merged_write(sbi, NODE);
1401                 submitted = NULL;
1402         }
1403         if (submitted)
1404                 *submitted = fio.submitted;
1405
1406         if (do_balance)
1407                 f2fs_balance_fs(sbi, false);
1408         return 0;
1409
1410 redirty_out:
1411         redirty_page_for_writepage(wbc, page);
1412         return AOP_WRITEPAGE_ACTIVATE;
1413 }
1414
1415 static int f2fs_write_node_page(struct page *page,
1416                                 struct writeback_control *wbc)
1417 {
1418         return __write_node_page(page, false, NULL, wbc, false, FS_NODE_IO);
1419 }
1420
1421 int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1422                         struct writeback_control *wbc, bool atomic)
1423 {
1424         pgoff_t index, end;
1425         pgoff_t last_idx = ULONG_MAX;
1426         struct pagevec pvec;
1427         int ret = 0;
1428         struct page *last_page = NULL;
1429         bool marked = false;
1430         nid_t ino = inode->i_ino;
1431
1432         if (atomic) {
1433                 last_page = last_fsync_dnode(sbi, ino);
1434                 if (IS_ERR_OR_NULL(last_page))
1435                         return PTR_ERR_OR_ZERO(last_page);
1436         }
1437 retry:
1438         pagevec_init(&pvec, 0);
1439         index = 0;
1440         end = ULONG_MAX;
1441
1442         while (index <= end) {
1443                 int i, nr_pages;
1444                 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1445                                 PAGECACHE_TAG_DIRTY,
1446                                 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1447                 if (nr_pages == 0)
1448                         break;
1449
1450                 for (i = 0; i < nr_pages; i++) {
1451                         struct page *page = pvec.pages[i];
1452                         bool submitted = false;
1453
1454                         if (unlikely(f2fs_cp_error(sbi))) {
1455                                 f2fs_put_page(last_page, 0);
1456                                 pagevec_release(&pvec);
1457                                 ret = -EIO;
1458                                 goto out;
1459                         }
1460
1461                         if (!IS_DNODE(page) || !is_cold_node(page))
1462                                 continue;
1463                         if (ino_of_node(page) != ino)
1464                                 continue;
1465
1466                         lock_page(page);
1467
1468                         if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1469 continue_unlock:
1470                                 unlock_page(page);
1471                                 continue;
1472                         }
1473                         if (ino_of_node(page) != ino)
1474                                 goto continue_unlock;
1475
1476                         if (!PageDirty(page) && page != last_page) {
1477                                 /* someone wrote it for us */
1478                                 goto continue_unlock;
1479                         }
1480
1481                         f2fs_wait_on_page_writeback(page, NODE, true);
1482                         BUG_ON(PageWriteback(page));
1483
1484                         set_fsync_mark(page, 0);
1485                         set_dentry_mark(page, 0);
1486
1487                         if (!atomic || page == last_page) {
1488                                 set_fsync_mark(page, 1);
1489                                 if (IS_INODE(page)) {
1490                                         if (is_inode_flag_set(inode,
1491                                                                 FI_DIRTY_INODE))
1492                                                 update_inode(inode, page);
1493                                         set_dentry_mark(page,
1494                                                 need_dentry_mark(sbi, ino));
1495                                 }
1496                                 /*  may be written by other thread */
1497                                 if (!PageDirty(page))
1498                                         set_page_dirty(page);
1499                         }
1500
1501                         if (!clear_page_dirty_for_io(page))
1502                                 goto continue_unlock;
1503
1504                         ret = __write_node_page(page, atomic &&
1505                                                 page == last_page,
1506                                                 &submitted, wbc, true,
1507                                                 FS_NODE_IO);
1508                         if (ret) {
1509                                 unlock_page(page);
1510                                 f2fs_put_page(last_page, 0);
1511                                 break;
1512                         } else if (submitted) {
1513                                 last_idx = page->index;
1514                         }
1515
1516                         if (page == last_page) {
1517                                 f2fs_put_page(page, 0);
1518                                 marked = true;
1519                                 break;
1520                         }
1521                 }
1522                 pagevec_release(&pvec);
1523                 cond_resched();
1524
1525                 if (ret || marked)
1526                         break;
1527         }
1528         if (!ret && atomic && !marked) {
1529                 f2fs_msg(sbi->sb, KERN_DEBUG,
1530                         "Retry to write fsync mark: ino=%u, idx=%lx",
1531                                         ino, last_page->index);
1532                 lock_page(last_page);
1533                 f2fs_wait_on_page_writeback(last_page, NODE, true);
1534                 set_page_dirty(last_page);
1535                 unlock_page(last_page);
1536                 goto retry;
1537         }
1538 out:
1539         if (last_idx != ULONG_MAX)
1540                 f2fs_submit_merged_write_cond(sbi, NULL, ino, last_idx, NODE);
1541         return ret ? -EIO: 0;
1542 }
1543
1544 int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc,
1545                                 bool do_balance, enum iostat_type io_type)
1546 {
1547         pgoff_t index, end;
1548         struct pagevec pvec;
1549         int step = 0;
1550         int nwritten = 0;
1551         int ret = 0;
1552
1553         pagevec_init(&pvec, 0);
1554
1555 next_step:
1556         index = 0;
1557         end = ULONG_MAX;
1558
1559         while (index <= end) {
1560                 int i, nr_pages;
1561                 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1562                                 PAGECACHE_TAG_DIRTY,
1563                                 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1564                 if (nr_pages == 0)
1565                         break;
1566
1567                 for (i = 0; i < nr_pages; i++) {
1568                         struct page *page = pvec.pages[i];
1569                         bool submitted = false;
1570
1571                         if (unlikely(f2fs_cp_error(sbi))) {
1572                                 pagevec_release(&pvec);
1573                                 ret = -EIO;
1574                                 goto out;
1575                         }
1576
1577                         /*
1578                          * flushing sequence with step:
1579                          * 0. indirect nodes
1580                          * 1. dentry dnodes
1581                          * 2. file dnodes
1582                          */
1583                         if (step == 0 && IS_DNODE(page))
1584                                 continue;
1585                         if (step == 1 && (!IS_DNODE(page) ||
1586                                                 is_cold_node(page)))
1587                                 continue;
1588                         if (step == 2 && (!IS_DNODE(page) ||
1589                                                 !is_cold_node(page)))
1590                                 continue;
1591 lock_node:
1592                         if (!trylock_page(page))
1593                                 continue;
1594
1595                         if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1596 continue_unlock:
1597                                 unlock_page(page);
1598                                 continue;
1599                         }
1600
1601                         if (!PageDirty(page)) {
1602                                 /* someone wrote it for us */
1603                                 goto continue_unlock;
1604                         }
1605
1606                         /* flush inline_data */
1607                         if (is_inline_node(page)) {
1608                                 clear_inline_node(page);
1609                                 unlock_page(page);
1610                                 flush_inline_data(sbi, ino_of_node(page));
1611                                 goto lock_node;
1612                         }
1613
1614                         f2fs_wait_on_page_writeback(page, NODE, true);
1615
1616                         BUG_ON(PageWriteback(page));
1617                         if (!clear_page_dirty_for_io(page))
1618                                 goto continue_unlock;
1619
1620                         set_fsync_mark(page, 0);
1621                         set_dentry_mark(page, 0);
1622
1623                         ret = __write_node_page(page, false, &submitted,
1624                                                 wbc, do_balance, io_type);
1625                         if (ret)
1626                                 unlock_page(page);
1627                         else if (submitted)
1628                                 nwritten++;
1629
1630                         if (--wbc->nr_to_write == 0)
1631                                 break;
1632                 }
1633                 pagevec_release(&pvec);
1634                 cond_resched();
1635
1636                 if (wbc->nr_to_write == 0) {
1637                         step = 2;
1638                         break;
1639                 }
1640         }
1641
1642         if (step < 2) {
1643                 step++;
1644                 goto next_step;
1645         }
1646 out:
1647         if (nwritten)
1648                 f2fs_submit_merged_write(sbi, NODE);
1649         return ret;
1650 }
1651
1652 int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
1653 {
1654         pgoff_t index = 0, end = ULONG_MAX;
1655         struct pagevec pvec;
1656         int ret2, ret = 0;
1657
1658         pagevec_init(&pvec, 0);
1659
1660         while (index <= end) {
1661                 int i, nr_pages;
1662                 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1663                                 PAGECACHE_TAG_WRITEBACK,
1664                                 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1665                 if (nr_pages == 0)
1666                         break;
1667
1668                 for (i = 0; i < nr_pages; i++) {
1669                         struct page *page = pvec.pages[i];
1670
1671                         /* until radix tree lookup accepts end_index */
1672                         if (unlikely(page->index > end))
1673                                 continue;
1674
1675                         if (ino && ino_of_node(page) == ino) {
1676                                 f2fs_wait_on_page_writeback(page, NODE, true);
1677                                 if (TestClearPageError(page))
1678                                         ret = -EIO;
1679                         }
1680                 }
1681                 pagevec_release(&pvec);
1682                 cond_resched();
1683         }
1684
1685         ret2 = filemap_check_errors(NODE_MAPPING(sbi));
1686         if (!ret)
1687                 ret = ret2;
1688         return ret;
1689 }
1690
1691 static int f2fs_write_node_pages(struct address_space *mapping,
1692                             struct writeback_control *wbc)
1693 {
1694         struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
1695         struct blk_plug plug;
1696         long diff;
1697
1698         if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1699                 goto skip_write;
1700
1701         /* balancing f2fs's metadata in background */
1702         f2fs_balance_fs_bg(sbi);
1703
1704         /* collect a number of dirty node pages and write together */
1705         if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
1706                 goto skip_write;
1707
1708         trace_f2fs_writepages(mapping->host, wbc, NODE);
1709
1710         diff = nr_pages_to_write(sbi, NODE, wbc);
1711         wbc->sync_mode = WB_SYNC_NONE;
1712         blk_start_plug(&plug);
1713         sync_node_pages(sbi, wbc, true, FS_NODE_IO);
1714         blk_finish_plug(&plug);
1715         wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1716         return 0;
1717
1718 skip_write:
1719         wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
1720         trace_f2fs_writepages(mapping->host, wbc, NODE);
1721         return 0;
1722 }
1723
1724 static int f2fs_set_node_page_dirty(struct page *page)
1725 {
1726         trace_f2fs_set_page_dirty(page, NODE);
1727
1728         if (!PageUptodate(page))
1729                 SetPageUptodate(page);
1730         if (!PageDirty(page)) {
1731                 f2fs_set_page_dirty_nobuffers(page);
1732                 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
1733                 SetPagePrivate(page);
1734                 f2fs_trace_pid(page);
1735                 return 1;
1736         }
1737         return 0;
1738 }
1739
1740 /*
1741  * Structure of the f2fs node operations
1742  */
1743 const struct address_space_operations f2fs_node_aops = {
1744         .writepage      = f2fs_write_node_page,
1745         .writepages     = f2fs_write_node_pages,
1746         .set_page_dirty = f2fs_set_node_page_dirty,
1747         .invalidatepage = f2fs_invalidate_page,
1748         .releasepage    = f2fs_release_page,
1749 #ifdef CONFIG_MIGRATION
1750         .migratepage    = f2fs_migrate_page,
1751 #endif
1752 };
1753
1754 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
1755                                                 nid_t n)
1756 {
1757         return radix_tree_lookup(&nm_i->free_nid_root, n);
1758 }
1759
1760 static int __insert_nid_to_list(struct f2fs_sb_info *sbi,
1761                         struct free_nid *i, enum nid_list list, bool new)
1762 {
1763         struct f2fs_nm_info *nm_i = NM_I(sbi);
1764
1765         if (new) {
1766                 int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
1767                 if (err)
1768                         return err;
1769         }
1770
1771         f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW :
1772                                                 i->state != NID_ALLOC);
1773         nm_i->nid_cnt[list]++;
1774         list_add_tail(&i->list, &nm_i->nid_list[list]);
1775         return 0;
1776 }
1777
1778 static void __remove_nid_from_list(struct f2fs_sb_info *sbi,
1779                         struct free_nid *i, enum nid_list list, bool reuse)
1780 {
1781         struct f2fs_nm_info *nm_i = NM_I(sbi);
1782
1783         f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW :
1784                                                 i->state != NID_ALLOC);
1785         nm_i->nid_cnt[list]--;
1786         list_del(&i->list);
1787         if (!reuse)
1788                 radix_tree_delete(&nm_i->free_nid_root, i->nid);
1789 }
1790
1791 /* return if the nid is recognized as free */
1792 static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
1793 {
1794         struct f2fs_nm_info *nm_i = NM_I(sbi);
1795         struct free_nid *i, *e;
1796         struct nat_entry *ne;
1797         int err = -EINVAL;
1798         bool ret = false;
1799
1800         /* 0 nid should not be used */
1801         if (unlikely(nid == 0))
1802                 return false;
1803
1804         i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1805         i->nid = nid;
1806         i->state = NID_NEW;
1807
1808         if (radix_tree_preload(GFP_NOFS))
1809                 goto err;
1810
1811         spin_lock(&nm_i->nid_list_lock);
1812
1813         if (build) {
1814                 /*
1815                  *   Thread A             Thread B
1816                  *  - f2fs_create
1817                  *   - f2fs_new_inode
1818                  *    - alloc_nid
1819                  *     - __insert_nid_to_list(ALLOC_NID_LIST)
1820                  *                     - f2fs_balance_fs_bg
1821                  *                      - build_free_nids
1822                  *                       - __build_free_nids
1823                  *                        - scan_nat_page
1824                  *                         - add_free_nid
1825                  *                          - __lookup_nat_cache
1826                  *  - f2fs_add_link
1827                  *   - init_inode_metadata
1828                  *    - new_inode_page
1829                  *     - new_node_page
1830                  *      - set_node_addr
1831                  *  - alloc_nid_done
1832                  *   - __remove_nid_from_list(ALLOC_NID_LIST)
1833                  *                         - __insert_nid_to_list(FREE_NID_LIST)
1834                  */
1835                 ne = __lookup_nat_cache(nm_i, nid);
1836                 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
1837                                 nat_get_blkaddr(ne) != NULL_ADDR))
1838                         goto err_out;
1839
1840                 e = __lookup_free_nid_list(nm_i, nid);
1841                 if (e) {
1842                         if (e->state == NID_NEW)
1843                                 ret = true;
1844                         goto err_out;
1845                 }
1846         }
1847         ret = true;
1848         err = __insert_nid_to_list(sbi, i, FREE_NID_LIST, true);
1849 err_out:
1850         spin_unlock(&nm_i->nid_list_lock);
1851         radix_tree_preload_end();
1852 err:
1853         if (err)
1854                 kmem_cache_free(free_nid_slab, i);
1855         return ret;
1856 }
1857
1858 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
1859 {
1860         struct f2fs_nm_info *nm_i = NM_I(sbi);
1861         struct free_nid *i;
1862         bool need_free = false;
1863
1864         spin_lock(&nm_i->nid_list_lock);
1865         i = __lookup_free_nid_list(nm_i, nid);
1866         if (i && i->state == NID_NEW) {
1867                 __remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
1868                 need_free = true;
1869         }
1870         spin_unlock(&nm_i->nid_list_lock);
1871
1872         if (need_free)
1873                 kmem_cache_free(free_nid_slab, i);
1874 }
1875
1876 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
1877                                                         bool set, bool build)
1878 {
1879         struct f2fs_nm_info *nm_i = NM_I(sbi);
1880         unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
1881         unsigned int nid_ofs = nid - START_NID(nid);
1882
1883         if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
1884                 return;
1885
1886         if (set)
1887                 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
1888         else
1889                 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
1890
1891         if (set)
1892                 nm_i->free_nid_count[nat_ofs]++;
1893         else if (!build)
1894                 nm_i->free_nid_count[nat_ofs]--;
1895 }
1896
1897 static void scan_nat_page(struct f2fs_sb_info *sbi,
1898                         struct page *nat_page, nid_t start_nid)
1899 {
1900         struct f2fs_nm_info *nm_i = NM_I(sbi);
1901         struct f2fs_nat_block *nat_blk = page_address(nat_page);
1902         block_t blk_addr;
1903         unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
1904         int i;
1905
1906         if (test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
1907                 return;
1908
1909         __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
1910
1911         i = start_nid % NAT_ENTRY_PER_BLOCK;
1912
1913         for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1914                 bool freed = false;
1915
1916                 if (unlikely(start_nid >= nm_i->max_nid))
1917                         break;
1918
1919                 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1920                 f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
1921                 if (blk_addr == NULL_ADDR)
1922                         freed = add_free_nid(sbi, start_nid, true);
1923                 spin_lock(&NM_I(sbi)->nid_list_lock);
1924                 update_free_nid_bitmap(sbi, start_nid, freed, true);
1925                 spin_unlock(&NM_I(sbi)->nid_list_lock);
1926         }
1927 }
1928
1929 static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
1930 {
1931         struct f2fs_nm_info *nm_i = NM_I(sbi);
1932         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1933         struct f2fs_journal *journal = curseg->journal;
1934         unsigned int i, idx;
1935
1936         down_read(&nm_i->nat_tree_lock);
1937
1938         for (i = 0; i < nm_i->nat_blocks; i++) {
1939                 if (!test_bit_le(i, nm_i->nat_block_bitmap))
1940                         continue;
1941                 if (!nm_i->free_nid_count[i])
1942                         continue;
1943                 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
1944                         nid_t nid;
1945
1946                         if (!test_bit_le(idx, nm_i->free_nid_bitmap[i]))
1947                                 continue;
1948
1949                         nid = i * NAT_ENTRY_PER_BLOCK + idx;
1950                         add_free_nid(sbi, nid, true);
1951
1952                         if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS)
1953                                 goto out;
1954                 }
1955         }
1956 out:
1957         down_read(&curseg->journal_rwsem);
1958         for (i = 0; i < nats_in_cursum(journal); i++) {
1959                 block_t addr;
1960                 nid_t nid;
1961
1962                 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
1963                 nid = le32_to_cpu(nid_in_journal(journal, i));
1964                 if (addr == NULL_ADDR)
1965                         add_free_nid(sbi, nid, true);
1966                 else
1967                         remove_free_nid(sbi, nid);
1968         }
1969         up_read(&curseg->journal_rwsem);
1970         up_read(&nm_i->nat_tree_lock);
1971 }
1972
1973 static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
1974 {
1975         struct f2fs_nm_info *nm_i = NM_I(sbi);
1976         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1977         struct f2fs_journal *journal = curseg->journal;
1978         int i = 0;
1979         nid_t nid = nm_i->next_scan_nid;
1980
1981         if (unlikely(nid >= nm_i->max_nid))
1982                 nid = 0;
1983
1984         /* Enough entries */
1985         if (nm_i->nid_cnt[FREE_NID_LIST] >= NAT_ENTRY_PER_BLOCK)
1986                 return;
1987
1988         if (!sync && !available_free_memory(sbi, FREE_NIDS))
1989                 return;
1990
1991         if (!mount) {
1992                 /* try to find free nids in free_nid_bitmap */
1993                 scan_free_nid_bits(sbi);
1994
1995                 if (nm_i->nid_cnt[FREE_NID_LIST])
1996                         return;
1997         }
1998
1999         /* readahead nat pages to be scanned */
2000         ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
2001                                                         META_NAT, true);
2002
2003         down_read(&nm_i->nat_tree_lock);
2004
2005         while (1) {
2006                 struct page *page = get_current_nat_page(sbi, nid);
2007
2008                 scan_nat_page(sbi, page, nid);
2009                 f2fs_put_page(page, 1);
2010
2011                 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2012                 if (unlikely(nid >= nm_i->max_nid))
2013                         nid = 0;
2014
2015                 if (++i >= FREE_NID_PAGES)
2016                         break;
2017         }
2018
2019         /* go to the next free nat pages to find free nids abundantly */
2020         nm_i->next_scan_nid = nid;
2021
2022         /* find free nids from current sum_pages */
2023         down_read(&curseg->journal_rwsem);
2024         for (i = 0; i < nats_in_cursum(journal); i++) {
2025                 block_t addr;
2026
2027                 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
2028                 nid = le32_to_cpu(nid_in_journal(journal, i));
2029                 if (addr == NULL_ADDR)
2030                         add_free_nid(sbi, nid, true);
2031                 else
2032                         remove_free_nid(sbi, nid);
2033         }
2034         up_read(&curseg->journal_rwsem);
2035         up_read(&nm_i->nat_tree_lock);
2036
2037         ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2038                                         nm_i->ra_nid_pages, META_NAT, false);
2039 }
2040
2041 void build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
2042 {
2043         mutex_lock(&NM_I(sbi)->build_lock);
2044         __build_free_nids(sbi, sync, mount);
2045         mutex_unlock(&NM_I(sbi)->build_lock);
2046 }
2047
2048 /*
2049  * If this function returns success, caller can obtain a new nid
2050  * from second parameter of this function.
2051  * The returned nid could be used ino as well as nid when inode is created.
2052  */
2053 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
2054 {
2055         struct f2fs_nm_info *nm_i = NM_I(sbi);
2056         struct free_nid *i = NULL;
2057 retry:
2058 #ifdef CONFIG_F2FS_FAULT_INJECTION
2059         if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
2060                 f2fs_show_injection_info(FAULT_ALLOC_NID);
2061                 return false;
2062         }
2063 #endif
2064         spin_lock(&nm_i->nid_list_lock);
2065
2066         if (unlikely(nm_i->available_nids == 0)) {
2067                 spin_unlock(&nm_i->nid_list_lock);
2068                 return false;
2069         }
2070
2071         /* We should not use stale free nids created by build_free_nids */
2072         if (nm_i->nid_cnt[FREE_NID_LIST] && !on_build_free_nids(nm_i)) {
2073                 f2fs_bug_on(sbi, list_empty(&nm_i->nid_list[FREE_NID_LIST]));
2074                 i = list_first_entry(&nm_i->nid_list[FREE_NID_LIST],
2075                                         struct free_nid, list);
2076                 *nid = i->nid;
2077
2078                 __remove_nid_from_list(sbi, i, FREE_NID_LIST, true);
2079                 i->state = NID_ALLOC;
2080                 __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false);
2081                 nm_i->available_nids--;
2082
2083                 update_free_nid_bitmap(sbi, *nid, false, false);
2084
2085                 spin_unlock(&nm_i->nid_list_lock);
2086                 return true;
2087         }
2088         spin_unlock(&nm_i->nid_list_lock);
2089
2090         /* Let's scan nat pages and its caches to get free nids */
2091         build_free_nids(sbi, true, false);
2092         goto retry;
2093 }
2094
2095 /*
2096  * alloc_nid() should be called prior to this function.
2097  */
2098 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
2099 {
2100         struct f2fs_nm_info *nm_i = NM_I(sbi);
2101         struct free_nid *i;
2102
2103         spin_lock(&nm_i->nid_list_lock);
2104         i = __lookup_free_nid_list(nm_i, nid);
2105         f2fs_bug_on(sbi, !i);
2106         __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false);
2107         spin_unlock(&nm_i->nid_list_lock);
2108
2109         kmem_cache_free(free_nid_slab, i);
2110 }
2111
2112 /*
2113  * alloc_nid() should be called prior to this function.
2114  */
2115 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2116 {
2117         struct f2fs_nm_info *nm_i = NM_I(sbi);
2118         struct free_nid *i;
2119         bool need_free = false;
2120
2121         if (!nid)
2122                 return;
2123
2124         spin_lock(&nm_i->nid_list_lock);
2125         i = __lookup_free_nid_list(nm_i, nid);
2126         f2fs_bug_on(sbi, !i);
2127
2128         if (!available_free_memory(sbi, FREE_NIDS)) {
2129                 __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false);
2130                 need_free = true;
2131         } else {
2132                 __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, true);
2133                 i->state = NID_NEW;
2134                 __insert_nid_to_list(sbi, i, FREE_NID_LIST, false);
2135         }
2136
2137         nm_i->available_nids++;
2138
2139         update_free_nid_bitmap(sbi, nid, true, false);
2140
2141         spin_unlock(&nm_i->nid_list_lock);
2142
2143         if (need_free)
2144                 kmem_cache_free(free_nid_slab, i);
2145 }
2146
2147 int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
2148 {
2149         struct f2fs_nm_info *nm_i = NM_I(sbi);
2150         struct free_nid *i, *next;
2151         int nr = nr_shrink;
2152
2153         if (nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS)
2154                 return 0;
2155
2156         if (!mutex_trylock(&nm_i->build_lock))
2157                 return 0;
2158
2159         spin_lock(&nm_i->nid_list_lock);
2160         list_for_each_entry_safe(i, next, &nm_i->nid_list[FREE_NID_LIST],
2161                                                                         list) {
2162                 if (nr_shrink <= 0 ||
2163                                 nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS)
2164                         break;
2165
2166                 __remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
2167                 kmem_cache_free(free_nid_slab, i);
2168                 nr_shrink--;
2169         }
2170         spin_unlock(&nm_i->nid_list_lock);
2171         mutex_unlock(&nm_i->build_lock);
2172
2173         return nr - nr_shrink;
2174 }
2175
2176 void recover_inline_xattr(struct inode *inode, struct page *page)
2177 {
2178         void *src_addr, *dst_addr;
2179         size_t inline_size;
2180         struct page *ipage;
2181         struct f2fs_inode *ri;
2182
2183         ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
2184         f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
2185
2186         ri = F2FS_INODE(page);
2187         if (!(ri->i_inline & F2FS_INLINE_XATTR)) {
2188                 clear_inode_flag(inode, FI_INLINE_XATTR);
2189                 goto update_inode;
2190         }
2191
2192         dst_addr = inline_xattr_addr(ipage);
2193         src_addr = inline_xattr_addr(page);
2194         inline_size = inline_xattr_size(inode);
2195
2196         f2fs_wait_on_page_writeback(ipage, NODE, true);
2197         memcpy(dst_addr, src_addr, inline_size);
2198 update_inode:
2199         update_inode(inode, ipage);
2200         f2fs_put_page(ipage, 1);
2201 }
2202
2203 int recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
2204 {
2205         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2206         nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
2207         nid_t new_xnid;
2208         struct dnode_of_data dn;
2209         struct node_info ni;
2210         struct page *xpage;
2211
2212         if (!prev_xnid)
2213                 goto recover_xnid;
2214
2215         /* 1: invalidate the previous xattr nid */
2216         get_node_info(sbi, prev_xnid, &ni);
2217         f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
2218         invalidate_blocks(sbi, ni.blk_addr);
2219         dec_valid_node_count(sbi, inode, false);
2220         set_node_addr(sbi, &ni, NULL_ADDR, false);
2221
2222 recover_xnid:
2223         /* 2: update xattr nid in inode */
2224         if (!alloc_nid(sbi, &new_xnid))
2225                 return -ENOSPC;
2226
2227         set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
2228         xpage = new_node_page(&dn, XATTR_NODE_OFFSET);
2229         if (IS_ERR(xpage)) {
2230                 alloc_nid_failed(sbi, new_xnid);
2231                 return PTR_ERR(xpage);
2232         }
2233
2234         alloc_nid_done(sbi, new_xnid);
2235         update_inode_page(inode);
2236
2237         /* 3: update and set xattr node page dirty */
2238         memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
2239
2240         set_page_dirty(xpage);
2241         f2fs_put_page(xpage, 1);
2242
2243         return 0;
2244 }
2245
2246 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2247 {
2248         struct f2fs_inode *src, *dst;
2249         nid_t ino = ino_of_node(page);
2250         struct node_info old_ni, new_ni;
2251         struct page *ipage;
2252
2253         get_node_info(sbi, ino, &old_ni);
2254
2255         if (unlikely(old_ni.blk_addr != NULL_ADDR))
2256                 return -EINVAL;
2257 retry:
2258         ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2259         if (!ipage) {
2260                 congestion_wait(BLK_RW_ASYNC, HZ/50);
2261                 goto retry;
2262         }
2263
2264         /* Should not use this inode from free nid list */
2265         remove_free_nid(sbi, ino);
2266
2267         if (!PageUptodate(ipage))
2268                 SetPageUptodate(ipage);
2269         fill_node_footer(ipage, ino, ino, 0, true);
2270
2271         src = F2FS_INODE(page);
2272         dst = F2FS_INODE(ipage);
2273
2274         memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
2275         dst->i_size = 0;
2276         dst->i_blocks = cpu_to_le64(1);
2277         dst->i_links = cpu_to_le32(1);
2278         dst->i_xattr_nid = 0;
2279         dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
2280         if (dst->i_inline & F2FS_EXTRA_ATTR) {
2281                 dst->i_extra_isize = src->i_extra_isize;
2282                 if (f2fs_sb_has_project_quota(sbi->sb) &&
2283                         F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2284                                                                 i_projid))
2285                         dst->i_projid = src->i_projid;
2286         }
2287
2288         new_ni = old_ni;
2289         new_ni.ino = ino;
2290
2291         if (unlikely(inc_valid_node_count(sbi, NULL, true)))
2292                 WARN_ON(1);
2293         set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2294         inc_valid_inode_count(sbi);
2295         set_page_dirty(ipage);
2296         f2fs_put_page(ipage, 1);
2297         return 0;
2298 }
2299
2300 int restore_node_summary(struct f2fs_sb_info *sbi,
2301                         unsigned int segno, struct f2fs_summary_block *sum)
2302 {
2303         struct f2fs_node *rn;
2304         struct f2fs_summary *sum_entry;
2305         block_t addr;
2306         int i, idx, last_offset, nrpages;
2307
2308         /* scan the node segment */
2309         last_offset = sbi->blocks_per_seg;
2310         addr = START_BLOCK(sbi, segno);
2311         sum_entry = &sum->entries[0];
2312
2313         for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2314                 nrpages = min(last_offset - i, BIO_MAX_PAGES);
2315
2316                 /* readahead node pages */
2317                 ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2318
2319                 for (idx = addr; idx < addr + nrpages; idx++) {
2320                         struct page *page = get_tmp_page(sbi, idx);
2321
2322                         rn = F2FS_NODE(page);
2323                         sum_entry->nid = rn->footer.nid;
2324                         sum_entry->version = 0;
2325                         sum_entry->ofs_in_node = 0;
2326                         sum_entry++;
2327                         f2fs_put_page(page, 1);
2328                 }
2329
2330                 invalidate_mapping_pages(META_MAPPING(sbi), addr,
2331                                                         addr + nrpages);
2332         }
2333         return 0;
2334 }
2335
2336 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2337 {
2338         struct f2fs_nm_info *nm_i = NM_I(sbi);
2339         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2340         struct f2fs_journal *journal = curseg->journal;
2341         int i;
2342
2343         down_write(&curseg->journal_rwsem);
2344         for (i = 0; i < nats_in_cursum(journal); i++) {
2345                 struct nat_entry *ne;
2346                 struct f2fs_nat_entry raw_ne;
2347                 nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2348
2349                 raw_ne = nat_in_journal(journal, i);
2350
2351                 ne = __lookup_nat_cache(nm_i, nid);
2352                 if (!ne) {
2353                         ne = grab_nat_entry(nm_i, nid, true);
2354                         node_info_from_raw_nat(&ne->ni, &raw_ne);
2355                 }
2356
2357                 /*
2358                  * if a free nat in journal has not been used after last
2359                  * checkpoint, we should remove it from available nids,
2360                  * since later we will add it again.
2361                  */
2362                 if (!get_nat_flag(ne, IS_DIRTY) &&
2363                                 le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
2364                         spin_lock(&nm_i->nid_list_lock);
2365                         nm_i->available_nids--;
2366                         spin_unlock(&nm_i->nid_list_lock);
2367                 }
2368
2369                 __set_nat_cache_dirty(nm_i, ne);
2370         }
2371         update_nats_in_cursum(journal, -i);
2372         up_write(&curseg->journal_rwsem);
2373 }
2374
2375 static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2376                                                 struct list_head *head, int max)
2377 {
2378         struct nat_entry_set *cur;
2379
2380         if (nes->entry_cnt >= max)
2381                 goto add_out;
2382
2383         list_for_each_entry(cur, head, set_list) {
2384                 if (cur->entry_cnt >= nes->entry_cnt) {
2385                         list_add(&nes->set_list, cur->set_list.prev);
2386                         return;
2387                 }
2388         }
2389 add_out:
2390         list_add_tail(&nes->set_list, head);
2391 }
2392
2393 static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2394                                                 struct page *page)
2395 {
2396         struct f2fs_nm_info *nm_i = NM_I(sbi);
2397         unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
2398         struct f2fs_nat_block *nat_blk = page_address(page);
2399         int valid = 0;
2400         int i;
2401
2402         if (!enabled_nat_bits(sbi, NULL))
2403                 return;
2404
2405         for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) {
2406                 if (start_nid == 0 && i == 0)
2407                         valid++;
2408                 if (nat_blk->entries[i].block_addr)
2409                         valid++;
2410         }
2411         if (valid == 0) {
2412                 __set_bit_le(nat_index, nm_i->empty_nat_bits);
2413                 __clear_bit_le(nat_index, nm_i->full_nat_bits);
2414                 return;
2415         }
2416
2417         __clear_bit_le(nat_index, nm_i->empty_nat_bits);
2418         if (valid == NAT_ENTRY_PER_BLOCK)
2419                 __set_bit_le(nat_index, nm_i->full_nat_bits);
2420         else
2421                 __clear_bit_le(nat_index, nm_i->full_nat_bits);
2422 }
2423
2424 static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2425                 struct nat_entry_set *set, struct cp_control *cpc)
2426 {
2427         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2428         struct f2fs_journal *journal = curseg->journal;
2429         nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
2430         bool to_journal = true;
2431         struct f2fs_nat_block *nat_blk;
2432         struct nat_entry *ne, *cur;
2433         struct page *page = NULL;
2434
2435         /*
2436          * there are two steps to flush nat entries:
2437          * #1, flush nat entries to journal in current hot data summary block.
2438          * #2, flush nat entries to nat page.
2439          */
2440         if (enabled_nat_bits(sbi, cpc) ||
2441                 !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
2442                 to_journal = false;
2443
2444         if (to_journal) {
2445                 down_write(&curseg->journal_rwsem);
2446         } else {
2447                 page = get_next_nat_page(sbi, start_nid);
2448                 nat_blk = page_address(page);
2449                 f2fs_bug_on(sbi, !nat_blk);
2450         }
2451
2452         /* flush dirty nats in nat entry set */
2453         list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
2454                 struct f2fs_nat_entry *raw_ne;
2455                 nid_t nid = nat_get_nid(ne);
2456                 int offset;
2457
2458                 f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
2459
2460                 if (to_journal) {
2461                         offset = lookup_journal_in_cursum(journal,
2462                                                         NAT_JOURNAL, nid, 1);
2463                         f2fs_bug_on(sbi, offset < 0);
2464                         raw_ne = &nat_in_journal(journal, offset);
2465                         nid_in_journal(journal, offset) = cpu_to_le32(nid);
2466                 } else {
2467                         raw_ne = &nat_blk->entries[nid - start_nid];
2468                 }
2469                 raw_nat_from_node_info(raw_ne, &ne->ni);
2470                 nat_reset_flag(ne);
2471                 __clear_nat_cache_dirty(NM_I(sbi), set, ne);
2472                 if (nat_get_blkaddr(ne) == NULL_ADDR) {
2473                         add_free_nid(sbi, nid, false);
2474                         spin_lock(&NM_I(sbi)->nid_list_lock);
2475                         NM_I(sbi)->available_nids++;
2476                         update_free_nid_bitmap(sbi, nid, true, false);
2477                         spin_unlock(&NM_I(sbi)->nid_list_lock);
2478                 } else {
2479                         spin_lock(&NM_I(sbi)->nid_list_lock);
2480                         update_free_nid_bitmap(sbi, nid, false, false);
2481                         spin_unlock(&NM_I(sbi)->nid_list_lock);
2482                 }
2483         }
2484
2485         if (to_journal) {
2486                 up_write(&curseg->journal_rwsem);
2487         } else {
2488                 __update_nat_bits(sbi, start_nid, page);
2489                 f2fs_put_page(page, 1);
2490         }
2491
2492         /* Allow dirty nats by node block allocation in write_begin */
2493         if (!set->entry_cnt) {
2494                 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
2495                 kmem_cache_free(nat_entry_set_slab, set);
2496         }
2497 }
2498
2499 /*
2500  * This function is called during the checkpointing process.
2501  */
2502 void flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
2503 {
2504         struct f2fs_nm_info *nm_i = NM_I(sbi);
2505         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2506         struct f2fs_journal *journal = curseg->journal;
2507         struct nat_entry_set *setvec[SETVEC_SIZE];
2508         struct nat_entry_set *set, *tmp;
2509         unsigned int found;
2510         nid_t set_idx = 0;
2511         LIST_HEAD(sets);
2512
2513         if (!nm_i->dirty_nat_cnt)
2514                 return;
2515
2516         down_write(&nm_i->nat_tree_lock);
2517
2518         /*
2519          * if there are no enough space in journal to store dirty nat
2520          * entries, remove all entries from journal and merge them
2521          * into nat entry set.
2522          */
2523         if (enabled_nat_bits(sbi, cpc) ||
2524                 !__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
2525                 remove_nats_in_journal(sbi);
2526
2527         while ((found = __gang_lookup_nat_set(nm_i,
2528                                         set_idx, SETVEC_SIZE, setvec))) {
2529                 unsigned idx;
2530                 set_idx = setvec[found - 1]->set + 1;
2531                 for (idx = 0; idx < found; idx++)
2532                         __adjust_nat_entry_set(setvec[idx], &sets,
2533                                                 MAX_NAT_JENTRIES(journal));
2534         }
2535
2536         /* flush dirty nats in nat entry set */
2537         list_for_each_entry_safe(set, tmp, &sets, set_list)
2538                 __flush_nat_entry_set(sbi, set, cpc);
2539
2540         up_write(&nm_i->nat_tree_lock);
2541         /* Allow dirty nats by node block allocation in write_begin */
2542 }
2543
2544 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
2545 {
2546         struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2547         struct f2fs_nm_info *nm_i = NM_I(sbi);
2548         unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
2549         unsigned int i;
2550         __u64 cp_ver = cur_cp_version(ckpt);
2551         block_t nat_bits_addr;
2552
2553         if (!enabled_nat_bits(sbi, NULL))
2554                 return 0;
2555
2556         nm_i->nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
2557                                                 F2FS_BLKSIZE - 1);
2558         nm_i->nat_bits = kzalloc(nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS,
2559                                                 GFP_KERNEL);
2560         if (!nm_i->nat_bits)
2561                 return -ENOMEM;
2562
2563         nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
2564                                                 nm_i->nat_bits_blocks;
2565         for (i = 0; i < nm_i->nat_bits_blocks; i++) {
2566                 struct page *page = get_meta_page(sbi, nat_bits_addr++);
2567
2568                 memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
2569                                         page_address(page), F2FS_BLKSIZE);
2570                 f2fs_put_page(page, 1);
2571         }
2572
2573         cp_ver |= (cur_cp_crc(ckpt) << 32);
2574         if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
2575                 disable_nat_bits(sbi, true);
2576                 return 0;
2577         }
2578
2579         nm_i->full_nat_bits = nm_i->nat_bits + 8;
2580         nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
2581
2582         f2fs_msg(sbi->sb, KERN_NOTICE, "Found nat_bits in checkpoint");
2583         return 0;
2584 }
2585
2586 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
2587 {
2588         struct f2fs_nm_info *nm_i = NM_I(sbi);
2589         unsigned int i = 0;
2590         nid_t nid, last_nid;
2591
2592         if (!enabled_nat_bits(sbi, NULL))
2593                 return;
2594
2595         for (i = 0; i < nm_i->nat_blocks; i++) {
2596                 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
2597                 if (i >= nm_i->nat_blocks)
2598                         break;
2599
2600                 __set_bit_le(i, nm_i->nat_block_bitmap);
2601
2602                 nid = i * NAT_ENTRY_PER_BLOCK;
2603                 last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
2604
2605                 spin_lock(&NM_I(sbi)->nid_list_lock);
2606                 for (; nid < last_nid; nid++)
2607                         update_free_nid_bitmap(sbi, nid, true, true);
2608                 spin_unlock(&NM_I(sbi)->nid_list_lock);
2609         }
2610
2611         for (i = 0; i < nm_i->nat_blocks; i++) {
2612                 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
2613                 if (i >= nm_i->nat_blocks)
2614                         break;
2615
2616                 __set_bit_le(i, nm_i->nat_block_bitmap);
2617         }
2618 }
2619
2620 static int init_node_manager(struct f2fs_sb_info *sbi)
2621 {
2622         struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
2623         struct f2fs_nm_info *nm_i = NM_I(sbi);
2624         unsigned char *version_bitmap;
2625         unsigned int nat_segs;
2626         int err;
2627
2628         nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
2629
2630         /* segment_count_nat includes pair segment so divide to 2. */
2631         nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
2632         nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
2633         nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
2634
2635         /* not used nids: 0, node, meta, (and root counted as valid node) */
2636         nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
2637                                                         F2FS_RESERVED_NODE_NUM;
2638         nm_i->nid_cnt[FREE_NID_LIST] = 0;
2639         nm_i->nid_cnt[ALLOC_NID_LIST] = 0;
2640         nm_i->nat_cnt = 0;
2641         nm_i->ram_thresh = DEF_RAM_THRESHOLD;
2642         nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
2643         nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
2644
2645         INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
2646         INIT_LIST_HEAD(&nm_i->nid_list[FREE_NID_LIST]);
2647         INIT_LIST_HEAD(&nm_i->nid_list[ALLOC_NID_LIST]);
2648         INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
2649         INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
2650         INIT_LIST_HEAD(&nm_i->nat_entries);
2651
2652         mutex_init(&nm_i->build_lock);
2653         spin_lock_init(&nm_i->nid_list_lock);
2654         init_rwsem(&nm_i->nat_tree_lock);
2655
2656         nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
2657         nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
2658         version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
2659         if (!version_bitmap)
2660                 return -EFAULT;
2661
2662         nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
2663                                         GFP_KERNEL);
2664         if (!nm_i->nat_bitmap)
2665                 return -ENOMEM;
2666
2667         err = __get_nat_bitmaps(sbi);
2668         if (err)
2669                 return err;
2670
2671 #ifdef CONFIG_F2FS_CHECK_FS
2672         nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
2673                                         GFP_KERNEL);
2674         if (!nm_i->nat_bitmap_mir)
2675                 return -ENOMEM;
2676 #endif
2677
2678         return 0;
2679 }
2680
2681 static int init_free_nid_cache(struct f2fs_sb_info *sbi)
2682 {
2683         struct f2fs_nm_info *nm_i = NM_I(sbi);
2684
2685         nm_i->free_nid_bitmap = kvzalloc(nm_i->nat_blocks *
2686                                         NAT_ENTRY_BITMAP_SIZE, GFP_KERNEL);
2687         if (!nm_i->free_nid_bitmap)
2688                 return -ENOMEM;
2689
2690         nm_i->nat_block_bitmap = kvzalloc(nm_i->nat_blocks / 8,
2691                                                                 GFP_KERNEL);
2692         if (!nm_i->nat_block_bitmap)
2693                 return -ENOMEM;
2694
2695         nm_i->free_nid_count = kvzalloc(nm_i->nat_blocks *
2696                                         sizeof(unsigned short), GFP_KERNEL);
2697         if (!nm_i->free_nid_count)
2698                 return -ENOMEM;
2699         return 0;
2700 }
2701
2702 int build_node_manager(struct f2fs_sb_info *sbi)
2703 {
2704         int err;
2705
2706         sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
2707         if (!sbi->nm_info)
2708                 return -ENOMEM;
2709
2710         err = init_node_manager(sbi);
2711         if (err)
2712                 return err;
2713
2714         err = init_free_nid_cache(sbi);
2715         if (err)
2716                 return err;
2717
2718         /* load free nid status from nat_bits table */
2719         load_free_nid_bitmap(sbi);
2720
2721         build_free_nids(sbi, true, true);
2722         return 0;
2723 }
2724
2725 void destroy_node_manager(struct f2fs_sb_info *sbi)
2726 {
2727         struct f2fs_nm_info *nm_i = NM_I(sbi);
2728         struct free_nid *i, *next_i;
2729         struct nat_entry *natvec[NATVEC_SIZE];
2730         struct nat_entry_set *setvec[SETVEC_SIZE];
2731         nid_t nid = 0;
2732         unsigned int found;
2733
2734         if (!nm_i)
2735                 return;
2736
2737         /* destroy free nid list */
2738         spin_lock(&nm_i->nid_list_lock);
2739         list_for_each_entry_safe(i, next_i, &nm_i->nid_list[FREE_NID_LIST],
2740                                                                         list) {
2741                 __remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
2742                 spin_unlock(&nm_i->nid_list_lock);
2743                 kmem_cache_free(free_nid_slab, i);
2744                 spin_lock(&nm_i->nid_list_lock);
2745         }
2746         f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID_LIST]);
2747         f2fs_bug_on(sbi, nm_i->nid_cnt[ALLOC_NID_LIST]);
2748         f2fs_bug_on(sbi, !list_empty(&nm_i->nid_list[ALLOC_NID_LIST]));
2749         spin_unlock(&nm_i->nid_list_lock);
2750
2751         /* destroy nat cache */
2752         down_write(&nm_i->nat_tree_lock);
2753         while ((found = __gang_lookup_nat_cache(nm_i,
2754                                         nid, NATVEC_SIZE, natvec))) {
2755                 unsigned idx;
2756
2757                 nid = nat_get_nid(natvec[found - 1]) + 1;
2758                 for (idx = 0; idx < found; idx++)
2759                         __del_from_nat_cache(nm_i, natvec[idx]);
2760         }
2761         f2fs_bug_on(sbi, nm_i->nat_cnt);
2762
2763         /* destroy nat set cache */
2764         nid = 0;
2765         while ((found = __gang_lookup_nat_set(nm_i,
2766                                         nid, SETVEC_SIZE, setvec))) {
2767                 unsigned idx;
2768
2769                 nid = setvec[found - 1]->set + 1;
2770                 for (idx = 0; idx < found; idx++) {
2771                         /* entry_cnt is not zero, when cp_error was occurred */
2772                         f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
2773                         radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
2774                         kmem_cache_free(nat_entry_set_slab, setvec[idx]);
2775                 }
2776         }
2777         up_write(&nm_i->nat_tree_lock);
2778
2779         kvfree(nm_i->nat_block_bitmap);
2780         kvfree(nm_i->free_nid_bitmap);
2781         kvfree(nm_i->free_nid_count);
2782
2783         kfree(nm_i->nat_bitmap);
2784         kfree(nm_i->nat_bits);
2785 #ifdef CONFIG_F2FS_CHECK_FS
2786         kfree(nm_i->nat_bitmap_mir);
2787 #endif
2788         sbi->nm_info = NULL;
2789         kfree(nm_i);
2790 }
2791
2792 int __init create_node_manager_caches(void)
2793 {
2794         nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
2795                         sizeof(struct nat_entry));
2796         if (!nat_entry_slab)
2797                 goto fail;
2798
2799         free_nid_slab = f2fs_kmem_cache_create("free_nid",
2800                         sizeof(struct free_nid));
2801         if (!free_nid_slab)
2802                 goto destroy_nat_entry;
2803
2804         nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set",
2805                         sizeof(struct nat_entry_set));
2806         if (!nat_entry_set_slab)
2807                 goto destroy_free_nid;
2808         return 0;
2809
2810 destroy_free_nid:
2811         kmem_cache_destroy(free_nid_slab);
2812 destroy_nat_entry:
2813         kmem_cache_destroy(nat_entry_slab);
2814 fail:
2815         return -ENOMEM;
2816 }
2817
2818 void destroy_node_manager_caches(void)
2819 {
2820         kmem_cache_destroy(nat_entry_set_slab);
2821         kmem_cache_destroy(free_nid_slab);
2822         kmem_cache_destroy(nat_entry_slab);
2823 }