2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need to a quota check
19 * program to be run on node crashes or anything like that.
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/completion.h>
43 #include <linux/buffer_head.h>
44 #include <linux/tty.h>
45 #include <linux/sort.h>
47 #include <asm/semaphore.h>
61 #include "ops_address.h"
66 static uint64_t qd2offset(struct gfs2_quota_data *qd)
70 offset = 2 * (uint64_t)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
71 offset *= sizeof(struct gfs2_quota);
76 static int qd_alloc(struct gfs2_sbd *sdp, int user, uint32_t id,
77 struct gfs2_quota_data **qdp)
79 struct gfs2_quota_data *qd;
82 qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_KERNEL);
89 set_bit(QDF_USER, &qd->qd_flags);
92 error = gfs2_glock_get(sdp, 2 * (uint64_t)id + !user,
93 &gfs2_quota_glops, CREATE, &qd->qd_gl);
97 error = gfs2_lvb_hold(qd->qd_gl);
98 gfs2_glock_put(qd->qd_gl);
111 static int qd_get(struct gfs2_sbd *sdp, int user, uint32_t id, int create,
112 struct gfs2_quota_data **qdp)
114 struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
121 spin_lock(&sdp->sd_quota_spin);
122 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
123 if (qd->qd_id == id &&
124 !test_bit(QDF_USER, &qd->qd_flags) == !user) {
136 list_add(&qd->qd_list, &sdp->sd_quota_list);
137 atomic_inc(&sdp->sd_quota_count);
141 spin_unlock(&sdp->sd_quota_spin);
145 gfs2_lvb_unhold(new_qd->qd_gl);
152 error = qd_alloc(sdp, user, id, &new_qd);
158 static void qd_hold(struct gfs2_quota_data *qd)
160 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
162 spin_lock(&sdp->sd_quota_spin);
163 gfs2_assert(sdp, qd->qd_count);
165 spin_unlock(&sdp->sd_quota_spin);
168 static void qd_put(struct gfs2_quota_data *qd)
170 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
171 spin_lock(&sdp->sd_quota_spin);
172 gfs2_assert(sdp, qd->qd_count);
174 qd->qd_last_touched = jiffies;
175 spin_unlock(&sdp->sd_quota_spin);
178 static int slot_get(struct gfs2_quota_data *qd)
180 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
181 unsigned int c, o = 0, b;
182 unsigned char byte = 0;
184 spin_lock(&sdp->sd_quota_spin);
186 if (qd->qd_slot_count++) {
187 spin_unlock(&sdp->sd_quota_spin);
191 for (c = 0; c < sdp->sd_quota_chunks; c++)
192 for (o = 0; o < PAGE_SIZE; o++) {
193 byte = sdp->sd_quota_bitmap[c][o];
201 for (b = 0; b < 8; b++)
202 if (!(byte & (1 << b)))
204 qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
206 if (qd->qd_slot >= sdp->sd_quota_slots)
209 sdp->sd_quota_bitmap[c][o] |= 1 << b;
211 spin_unlock(&sdp->sd_quota_spin);
217 spin_unlock(&sdp->sd_quota_spin);
221 static void slot_hold(struct gfs2_quota_data *qd)
223 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
225 spin_lock(&sdp->sd_quota_spin);
226 gfs2_assert(sdp, qd->qd_slot_count);
228 spin_unlock(&sdp->sd_quota_spin);
231 static void slot_put(struct gfs2_quota_data *qd)
233 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
235 spin_lock(&sdp->sd_quota_spin);
236 gfs2_assert(sdp, qd->qd_slot_count);
237 if (!--qd->qd_slot_count) {
238 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
241 spin_unlock(&sdp->sd_quota_spin);
244 static int bh_get(struct gfs2_quota_data *qd)
246 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
247 struct gfs2_inode *ip = get_v2ip(sdp->sd_qc_inode);
248 unsigned int block, offset;
251 struct buffer_head *bh;
254 down(&sdp->sd_quota_mutex);
256 if (qd->qd_bh_count++) {
257 up(&sdp->sd_quota_mutex);
261 block = qd->qd_slot / sdp->sd_qc_per_block;
262 offset = qd->qd_slot % sdp->sd_qc_per_block;;
264 error = gfs2_block_map(ip, block, &new, &dblock, NULL);
267 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT, &bh);
271 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
275 qd->qd_bh_qc = (struct gfs2_quota_change *)
276 (bh->b_data + sizeof(struct gfs2_meta_header) +
277 offset * sizeof(struct gfs2_quota_change));
279 up(&sdp->sd_quota_mutex);
288 up(&sdp->sd_quota_mutex);
292 static void bh_put(struct gfs2_quota_data *qd)
294 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
296 down(&sdp->sd_quota_mutex);
297 gfs2_assert(sdp, qd->qd_bh_count);
298 if (!--qd->qd_bh_count) {
303 up(&sdp->sd_quota_mutex);
306 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
308 struct gfs2_quota_data *qd = NULL;
314 if (sdp->sd_vfs->s_flags & MS_RDONLY)
317 spin_lock(&sdp->sd_quota_spin);
319 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
320 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
321 !test_bit(QDF_CHANGE, &qd->qd_flags) ||
322 qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
325 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
327 set_bit(QDF_LOCKED, &qd->qd_flags);
328 gfs2_assert_warn(sdp, qd->qd_count);
330 qd->qd_change_sync = qd->qd_change;
331 gfs2_assert_warn(sdp, qd->qd_slot_count);
341 spin_unlock(&sdp->sd_quota_spin);
344 gfs2_assert_warn(sdp, qd->qd_change_sync);
347 clear_bit(QDF_LOCKED, &qd->qd_flags);
359 static int qd_trylock(struct gfs2_quota_data *qd)
361 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
363 if (sdp->sd_vfs->s_flags & MS_RDONLY)
366 spin_lock(&sdp->sd_quota_spin);
368 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
369 !test_bit(QDF_CHANGE, &qd->qd_flags)) {
370 spin_unlock(&sdp->sd_quota_spin);
374 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
376 set_bit(QDF_LOCKED, &qd->qd_flags);
377 gfs2_assert_warn(sdp, qd->qd_count);
379 qd->qd_change_sync = qd->qd_change;
380 gfs2_assert_warn(sdp, qd->qd_slot_count);
383 spin_unlock(&sdp->sd_quota_spin);
385 gfs2_assert_warn(sdp, qd->qd_change_sync);
387 clear_bit(QDF_LOCKED, &qd->qd_flags);
396 static void qd_unlock(struct gfs2_quota_data *qd)
398 gfs2_assert_warn(qd->qd_gl->gl_sbd, test_bit(QDF_LOCKED, &qd->qd_flags));
399 clear_bit(QDF_LOCKED, &qd->qd_flags);
405 static int qdsb_get(struct gfs2_sbd *sdp, int user, uint32_t id, int create,
406 struct gfs2_quota_data **qdp)
410 error = qd_get(sdp, user, id, create, qdp);
414 error = slot_get(*qdp);
418 error = bh_get(*qdp);
432 static void qdsb_put(struct gfs2_quota_data *qd)
439 int gfs2_quota_hold(struct gfs2_inode *ip, uint32_t uid, uint32_t gid)
441 struct gfs2_sbd *sdp = ip->i_sbd;
442 struct gfs2_alloc *al = &ip->i_alloc;
443 struct gfs2_quota_data **qd = al->al_qd;
446 if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
447 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
450 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
453 error = qdsb_get(sdp, QUOTA_USER, ip->i_di.di_uid, CREATE, qd);
459 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_di.di_gid, CREATE, qd);
465 if (uid != NO_QUOTA_CHANGE && uid != ip->i_di.di_uid) {
466 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
473 if (gid != NO_QUOTA_CHANGE && gid != ip->i_di.di_gid) {
474 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
483 gfs2_quota_unhold(ip);
488 void gfs2_quota_unhold(struct gfs2_inode *ip)
490 struct gfs2_sbd *sdp = ip->i_sbd;
491 struct gfs2_alloc *al = &ip->i_alloc;
494 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
496 for (x = 0; x < al->al_qd_num; x++) {
497 qdsb_put(al->al_qd[x]);
503 static int sort_qd(const void *a, const void *b)
505 struct gfs2_quota_data *qd_a = *(struct gfs2_quota_data **)a;
506 struct gfs2_quota_data *qd_b = *(struct gfs2_quota_data **)b;
509 if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
510 !test_bit(QDF_USER, &qd_b->qd_flags)) {
511 if (test_bit(QDF_USER, &qd_a->qd_flags))
516 if (qd_a->qd_id < qd_b->qd_id)
518 else if (qd_a->qd_id > qd_b->qd_id)
525 static void do_qc(struct gfs2_quota_data *qd, int64_t change)
527 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
528 struct gfs2_inode *ip = get_v2ip(sdp->sd_qc_inode);
529 struct gfs2_quota_change *qc = qd->qd_bh_qc;
532 down(&sdp->sd_quota_mutex);
533 gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
535 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
538 if (test_bit(QDF_USER, &qd->qd_flags))
539 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
540 qc->qc_id = cpu_to_be32(qd->qd_id);
544 x = be64_to_cpu(x) + change;
545 qc->qc_change = cpu_to_be64(x);
547 spin_lock(&sdp->sd_quota_spin);
549 spin_unlock(&sdp->sd_quota_spin);
552 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
553 clear_bit(QDF_CHANGE, &qd->qd_flags);
558 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
563 up(&sdp->sd_quota_mutex);
569 * This function was mostly borrowed from gfs2_block_truncate_page which was
570 * in turn mostly borrowed from ext3
572 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
573 int64_t change, struct gfs2_quota_data *qd)
575 struct inode *inode = ip->i_vnode;
576 struct address_space *mapping = inode->i_mapping;
577 unsigned long index = loc >> PAGE_CACHE_SHIFT;
578 unsigned offset = loc & (PAGE_CACHE_SHIFT - 1);
579 unsigned blocksize, iblock, pos;
580 struct buffer_head *bh;
587 page = grab_cache_page(mapping, index);
591 blocksize = inode->i_sb->s_blocksize;
592 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
594 if (!page_has_buffers(page))
595 create_empty_buffers(page, blocksize, 0);
597 bh = page_buffers(page);
599 while (offset >= pos) {
600 bh = bh->b_this_page;
605 if (!buffer_mapped(bh)) {
606 gfs2_get_block(inode, iblock, bh, 1);
607 if (!buffer_mapped(bh))
611 if (PageUptodate(page))
612 set_buffer_uptodate(bh);
614 if (!buffer_uptodate(bh)) {
615 ll_rw_block(READ, 1, &bh);
617 if (!buffer_uptodate(bh))
621 gfs2_trans_add_bh(ip->i_gl, bh, 0);
623 kaddr = kmap_atomic(page, KM_USER0);
624 ptr = (__be64 *)(kaddr + offset);
625 value = *ptr = cpu_to_be64(be64_to_cpu(*ptr) + change);
626 flush_dcache_page(page);
627 kunmap_atomic(kaddr, KM_USER0);
629 qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC);
631 qd->qd_qb.qb_limit = cpu_to_be64(q.qu_limit);
632 qd->qd_qb.qb_warn = cpu_to_be64(q.qu_warn);
634 qd->qd_qb.qb_value = cpu_to_be64(value);
637 page_cache_release(page);
641 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
643 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
644 struct gfs2_inode *ip = get_v2ip(sdp->sd_quota_inode);
645 unsigned int data_blocks, ind_blocks;
646 struct file_ra_state ra_state;
647 struct gfs2_holder *ghs, i_gh;
649 struct gfs2_quota_data *qd;
651 unsigned int nalloc = 0;
652 struct gfs2_alloc *al = NULL;
655 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
656 &data_blocks, &ind_blocks);
658 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_KERNEL);
662 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
663 for (qx = 0; qx < num_qd; qx++) {
664 error = gfs2_glock_nq_init(qda[qx]->qd_gl,
666 GL_NOCACHE, &ghs[qx]);
671 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
675 for (x = 0; x < num_qd; x++) {
678 offset = qd2offset(qda[x]);
679 error = gfs2_write_alloc_required(ip, offset,
680 sizeof(struct gfs2_quota),
689 al = gfs2_alloc_get(ip);
691 al->al_requested = nalloc * (data_blocks + ind_blocks);
693 error = gfs2_inplace_reserve(ip);
697 error = gfs2_trans_begin(sdp,
698 al->al_rgd->rd_ri.ri_length +
699 num_qd * data_blocks +
700 nalloc * ind_blocks +
701 RES_DINODE + num_qd +
706 error = gfs2_trans_begin(sdp,
707 num_qd * data_blocks +
708 RES_DINODE + num_qd, 0);
713 file_ra_state_init(&ra_state, ip->i_vnode->i_mapping);
714 for (x = 0; x < num_qd; x++) {
716 offset = qd2offset(qd);
717 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
718 (struct gfs2_quota_data *)qd->qd_gl->gl_lvb);
722 do_qc(qd, -qd->qd_change_sync);
732 gfs2_inplace_release(ip);
739 gfs2_glock_dq_uninit(&i_gh);
743 gfs2_glock_dq_uninit(&ghs[qx]);
745 gfs2_log_flush_glock(ip->i_gl);
750 static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
751 struct gfs2_holder *q_gh)
753 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
754 struct gfs2_holder i_gh;
756 char buf[sizeof(struct gfs2_quota)];
757 struct file_ra_state ra_state;
760 file_ra_state_init(&ra_state, sdp->sd_quota_inode->i_mapping);
762 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
766 gfs2_quota_lvb_in(&qd->qd_qb, qd->qd_gl->gl_lvb);
768 if (force_refresh || qd->qd_qb.qb_magic != GFS2_MAGIC) {
770 gfs2_glock_dq_uninit(q_gh);
771 error = gfs2_glock_nq_init(qd->qd_gl,
772 LM_ST_EXCLUSIVE, GL_NOCACHE,
777 error = gfs2_glock_nq_init(get_v2ip(sdp->sd_quota_inode)->i_gl,
783 memset(buf, 0, sizeof(struct gfs2_quota));
785 error = gfs2_internal_read(get_v2ip(sdp->sd_quota_inode),
788 sizeof(struct gfs2_quota));
792 gfs2_glock_dq_uninit(&i_gh);
794 gfs2_quota_in(&q, buf);
796 memset(&qd->qd_qb, 0, sizeof(struct gfs2_quota_lvb));
797 qd->qd_qb.qb_magic = GFS2_MAGIC;
798 qd->qd_qb.qb_limit = q.qu_limit;
799 qd->qd_qb.qb_warn = q.qu_warn;
800 qd->qd_qb.qb_value = q.qu_value;
802 gfs2_quota_lvb_out(&qd->qd_qb, qd->qd_gl->gl_lvb);
804 if (gfs2_glock_is_blocking(qd->qd_gl)) {
805 gfs2_glock_dq_uninit(q_gh);
814 gfs2_glock_dq_uninit(&i_gh);
817 gfs2_glock_dq_uninit(q_gh);
822 int gfs2_quota_lock(struct gfs2_inode *ip, uint32_t uid, uint32_t gid)
824 struct gfs2_sbd *sdp = ip->i_sbd;
825 struct gfs2_alloc *al = &ip->i_alloc;
829 gfs2_quota_hold(ip, uid, gid);
831 if (capable(CAP_SYS_RESOURCE) ||
832 sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
835 sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
838 for (x = 0; x < al->al_qd_num; x++) {
839 error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
845 set_bit(GIF_QD_LOCKED, &ip->i_flags);
848 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
849 gfs2_quota_unhold(ip);
855 static int need_sync(struct gfs2_quota_data *qd)
857 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
858 struct gfs2_tune *gt = &sdp->sd_tune;
860 unsigned int num, den;
863 if (!qd->qd_qb.qb_limit)
866 spin_lock(&sdp->sd_quota_spin);
867 value = qd->qd_change;
868 spin_unlock(&sdp->sd_quota_spin);
870 spin_lock(>->gt_spin);
871 num = gt->gt_quota_scale_num;
872 den = gt->gt_quota_scale_den;
873 spin_unlock(>->gt_spin);
877 else if (qd->qd_qb.qb_value >= (int64_t)qd->qd_qb.qb_limit)
880 value *= gfs2_jindex_size(sdp) * num;
882 value += qd->qd_qb.qb_value;
883 if (value < (int64_t)qd->qd_qb.qb_limit)
890 void gfs2_quota_unlock(struct gfs2_inode *ip)
892 struct gfs2_alloc *al = &ip->i_alloc;
893 struct gfs2_quota_data *qda[4];
894 unsigned int count = 0;
897 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
900 for (x = 0; x < al->al_qd_num; x++) {
901 struct gfs2_quota_data *qd;
905 sync = need_sync(qd);
907 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
909 if (sync && qd_trylock(qd))
915 for (x = 0; x < count; x++)
920 gfs2_quota_unhold(ip);
925 static int print_message(struct gfs2_quota_data *qd, char *type)
927 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
931 line = kmalloc(MAX_LINE, GFP_KERNEL);
935 len = snprintf(line, MAX_LINE-1, "GFS2: fsid=%s: quota %s for %s %u\r\n",
936 sdp->sd_fsname, type,
937 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
939 line[MAX_LINE-1] = 0;
941 if (current->signal) { /* Is this test still required? */
942 tty_write_message(current->signal->tty, line);
950 int gfs2_quota_check(struct gfs2_inode *ip, uint32_t uid, uint32_t gid)
952 struct gfs2_sbd *sdp = ip->i_sbd;
953 struct gfs2_alloc *al = &ip->i_alloc;
954 struct gfs2_quota_data *qd;
959 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
962 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
965 for (x = 0; x < al->al_qd_num; x++) {
968 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
969 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
972 value = qd->qd_qb.qb_value;
973 spin_lock(&sdp->sd_quota_spin);
974 value += qd->qd_change;
975 spin_unlock(&sdp->sd_quota_spin);
977 if (qd->qd_qb.qb_limit && (int64_t)qd->qd_qb.qb_limit < value) {
978 print_message(qd, "exceeded");
981 } else if (qd->qd_qb.qb_warn &&
982 (int64_t)qd->qd_qb.qb_warn < value &&
983 time_after_eq(jiffies, qd->qd_last_warn +
984 gfs2_tune_get(sdp, gt_quota_warn_period) * HZ)) {
985 error = print_message(qd, "warning");
986 qd->qd_last_warn = jiffies;
993 void gfs2_quota_change(struct gfs2_inode *ip, int64_t change,
994 uint32_t uid, uint32_t gid)
996 struct gfs2_alloc *al = &ip->i_alloc;
997 struct gfs2_quota_data *qd;
999 unsigned int found = 0;
1001 if (gfs2_assert_warn(ip->i_sbd, change))
1003 if (ip->i_di.di_flags & GFS2_DIF_SYSTEM)
1006 for (x = 0; x < al->al_qd_num; x++) {
1009 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1010 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
1017 int gfs2_quota_sync(struct gfs2_sbd *sdp)
1019 struct gfs2_quota_data **qda;
1020 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1021 unsigned int num_qd;
1025 sdp->sd_quota_sync_gen++;
1027 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1035 error = qd_fish(sdp, qda + num_qd);
1036 if (error || !qda[num_qd])
1038 if (++num_qd == max_qd)
1044 error = do_sync(num_qd, qda);
1046 for (x = 0; x < num_qd; x++)
1047 qda[x]->qd_sync_gen =
1048 sdp->sd_quota_sync_gen;
1050 for (x = 0; x < num_qd; x++)
1053 } while (!error && num_qd == max_qd);
1060 int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, uint32_t id)
1062 struct gfs2_quota_data *qd;
1063 struct gfs2_holder q_gh;
1066 error = qd_get(sdp, user, id, CREATE, &qd);
1070 error = do_glock(qd, FORCE, &q_gh);
1072 gfs2_glock_dq_uninit(&q_gh);
1079 int gfs2_quota_read(struct gfs2_sbd *sdp, int user, uint32_t id,
1080 struct gfs2_quota *q)
1082 struct gfs2_quota_data *qd;
1083 struct gfs2_holder q_gh;
1086 if (((user) ? (id != current->fsuid) : (!in_group_p(id))) &&
1087 !capable(CAP_SYS_ADMIN))
1090 error = qd_get(sdp, user, id, CREATE, &qd);
1094 error = do_glock(qd, NO_FORCE, &q_gh);
1098 memset(q, 0, sizeof(struct gfs2_quota));
1099 q->qu_limit = qd->qd_qb.qb_limit;
1100 q->qu_warn = qd->qd_qb.qb_warn;
1101 q->qu_value = qd->qd_qb.qb_value;
1103 spin_lock(&sdp->sd_quota_spin);
1104 q->qu_value += qd->qd_change;
1105 spin_unlock(&sdp->sd_quota_spin);
1107 gfs2_glock_dq_uninit(&q_gh);
1115 int gfs2_quota_init(struct gfs2_sbd *sdp)
1117 struct gfs2_inode *ip = get_v2ip(sdp->sd_qc_inode);
1118 unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
1119 unsigned int x, slot = 0;
1120 unsigned int found = 0;
1122 uint32_t extlen = 0;
1125 if (!ip->i_di.di_size ||
1126 ip->i_di.di_size > (64 << 20) ||
1127 ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
1128 gfs2_consist_inode(ip);
1131 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1132 sdp->sd_quota_chunks = DIV_RU(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1136 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1137 sizeof(unsigned char *), GFP_KERNEL);
1138 if (!sdp->sd_quota_bitmap)
1141 for (x = 0; x < sdp->sd_quota_chunks; x++) {
1142 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL);
1143 if (!sdp->sd_quota_bitmap[x])
1147 for (x = 0; x < blocks; x++) {
1148 struct buffer_head *bh;
1153 error = gfs2_block_map(ip, x, &new, &dblock, &extlen);
1157 gfs2_meta_ra(ip->i_gl, dblock, extlen);
1158 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT,
1163 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1169 y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1171 struct gfs2_quota_change qc;
1172 struct gfs2_quota_data *qd;
1174 gfs2_quota_change_in(&qc, bh->b_data +
1175 sizeof(struct gfs2_meta_header) +
1176 y * sizeof(struct gfs2_quota_change));
1180 error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1187 set_bit(QDF_CHANGE, &qd->qd_flags);
1188 qd->qd_change = qc.qc_change;
1190 qd->qd_slot_count = 1;
1191 qd->qd_last_touched = jiffies;
1193 spin_lock(&sdp->sd_quota_spin);
1194 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1195 list_add(&qd->qd_list, &sdp->sd_quota_list);
1196 atomic_inc(&sdp->sd_quota_count);
1197 spin_unlock(&sdp->sd_quota_spin);
1208 fs_info(sdp, "found %u quota changes\n", found);
1213 gfs2_quota_cleanup(sdp);
1217 void gfs2_quota_scan(struct gfs2_sbd *sdp)
1219 struct gfs2_quota_data *qd, *safe;
1222 spin_lock(&sdp->sd_quota_spin);
1223 list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) {
1224 if (!qd->qd_count &&
1225 time_after_eq(jiffies, qd->qd_last_touched +
1226 gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) {
1227 list_move(&qd->qd_list, &dead);
1228 gfs2_assert_warn(sdp,
1229 atomic_read(&sdp->sd_quota_count) > 0);
1230 atomic_dec(&sdp->sd_quota_count);
1233 spin_unlock(&sdp->sd_quota_spin);
1235 while (!list_empty(&dead)) {
1236 qd = list_entry(dead.next, struct gfs2_quota_data, qd_list);
1237 list_del(&qd->qd_list);
1239 gfs2_assert_warn(sdp, !qd->qd_change);
1240 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1241 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1243 gfs2_lvb_unhold(qd->qd_gl);
1248 void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1250 struct list_head *head = &sdp->sd_quota_list;
1251 struct gfs2_quota_data *qd;
1254 spin_lock(&sdp->sd_quota_spin);
1255 while (!list_empty(head)) {
1256 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1258 if (qd->qd_count > 1 ||
1259 (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1260 list_move(&qd->qd_list, head);
1261 spin_unlock(&sdp->sd_quota_spin);
1263 spin_lock(&sdp->sd_quota_spin);
1267 list_del(&qd->qd_list);
1268 atomic_dec(&sdp->sd_quota_count);
1269 spin_unlock(&sdp->sd_quota_spin);
1271 if (!qd->qd_count) {
1272 gfs2_assert_warn(sdp, !qd->qd_change);
1273 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1275 gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1276 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1278 gfs2_lvb_unhold(qd->qd_gl);
1281 spin_lock(&sdp->sd_quota_spin);
1283 spin_unlock(&sdp->sd_quota_spin);
1285 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1287 if (sdp->sd_quota_bitmap) {
1288 for (x = 0; x < sdp->sd_quota_chunks; x++)
1289 kfree(sdp->sd_quota_bitmap[x]);
1290 kfree(sdp->sd_quota_bitmap);