4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
16 * UID task count cache, to get fast user lookup in "alloc_uid"
17 * when changing user ID's (ie setuid() and friends).
19 #define UIDHASH_BITS 8
20 #define UIDHASH_SZ (1 << UIDHASH_BITS)
21 #define UIDHASH_MASK (UIDHASH_SZ - 1)
22 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) ^ uid) & UIDHASH_MASK)
23 #define uidhashentry(uid) (uidhash_table + __uidhashfn(uid))
25 static kmem_cache_t *uid_cachep;
26 static struct user_struct *uidhash_table[UIDHASH_SZ];
27 static spinlock_t uidhash_lock = SPIN_LOCK_UNLOCKED;
29 struct user_struct root_user = {
30 __count: ATOMIC_INIT(1),
31 processes: ATOMIC_INIT(1),
36 * These routines must be called with the uidhash spinlock held!
38 static inline void uid_hash_insert(struct user_struct *up, struct user_struct **hashent)
40 struct user_struct *next = *hashent;
44 next->pprev = &up->next;
49 static inline void uid_hash_remove(struct user_struct *up)
51 struct user_struct *next = up->next;
52 struct user_struct **pprev = up->pprev;
59 static inline struct user_struct *uid_hash_find(uid_t uid, struct user_struct **hashent)
61 struct user_struct *next;
65 struct user_struct *up = next;
70 atomic_inc(&up->__count);
76 void free_uid(struct user_struct *up)
78 if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
80 kmem_cache_free(uid_cachep, up);
81 spin_unlock(&uidhash_lock);
85 struct user_struct * alloc_uid(uid_t uid)
87 struct user_struct **hashent = uidhashentry(uid);
88 struct user_struct *up;
90 spin_lock(&uidhash_lock);
91 up = uid_hash_find(uid, hashent);
92 spin_unlock(&uidhash_lock);
95 struct user_struct *new;
97 new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
101 atomic_set(&new->__count, 1);
102 atomic_set(&new->processes, 0);
103 atomic_set(&new->files, 0);
106 * Before adding this, check whether we raced
107 * on adding the same user already..
109 spin_lock(&uidhash_lock);
110 up = uid_hash_find(uid, hashent);
112 kmem_cache_free(uid_cachep, new);
114 uid_hash_insert(new, hashent);
117 spin_unlock(&uidhash_lock);
123 void switch_uid(struct user_struct *new_user)
125 struct user_struct *old_user;
127 /* What if a process setreuid()'s and this brings the
128 * new uid over his NPROC rlimit? We can check this now
129 * cheaply with the new uid cache, so if it matters
130 * we should be checking for it. -DaveM
132 old_user = current->user;
133 atomic_inc(&new_user->__count);
134 atomic_inc(&new_user->processes);
135 atomic_dec(&old_user->processes);
136 current->user = new_user;
141 static int __init uid_cache_init(void)
143 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
145 SLAB_HWCACHE_ALIGN, NULL, NULL);
147 panic("Cannot create uid taskcount SLAB cache\n");
149 /* Insert the root user immediately - init already runs with this */
150 uid_hash_insert(&root_user, uidhashentry(0));
154 module_init(uid_cache_init);