2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Fixed a disable_bh()/enable_bh() race (was causing a console lockup)
7 * due bh_mask_count not atomic handling. Copyright (C) 1998 Andrea Arcangeli
9 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
12 #include <linux/config.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/tqueue.h>
21 - No shared variables, all the data are CPU local.
22 - If a softirq needs serialization, let it serialize itself
24 - Even if softirq is serialized, only local cpu is marked for
25 execution. Hence, we get something sort of weak cpu binding.
26 Though it is still not clear, will it result in better locality
28 - These softirqs are not masked by global cli() and start_bh_atomic()
29 (by clear reasons). Hence, old parts of code still using global locks
30 MUST NOT use softirqs, but insert interfacing routines acquiring
31 global locks. F.e. look at BHs implementation.
34 - NET RX softirq. It is multithreaded and does not require
35 any global serialization.
36 - NET TX softirq. It kicks software netdevice queues, hence
37 it is logically serialized per device, but this serialization
38 is invisible to common code.
39 - Tasklets: serialized wrt itself.
40 - Bottom halves: globally serialized, grr...
43 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
45 static struct softirq_action softirq_vec[32] __cacheline_aligned;
48 * we cannot loop indefinitely here to avoid userspace starvation,
49 * but we also don't want to introduce a worst case 1/HZ latency
50 * to the pending events, so lets the scheduler to balance
51 * the softirq load for us.
53 static inline void wakeup_softirqd(unsigned cpu)
55 struct task_struct * tsk = ksoftirqd_task(cpu);
57 if (tsk && tsk->state != TASK_RUNNING)
61 asmlinkage void do_softirq()
63 int cpu = smp_processor_id();
71 local_irq_save(flags);
73 pending = softirq_pending(cpu);
76 struct softirq_action *h;
81 /* Reset the pending bitmask before enabling irqs */
82 softirq_pending(cpu) = 0;
97 pending = softirq_pending(cpu);
105 wakeup_softirqd(cpu);
108 local_irq_restore(flags);
112 * This function must run with irq disabled!
114 inline fastcall void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
116 __cpu_raise_softirq(cpu, nr);
119 * If we're in an interrupt or bh, we're done
120 * (this also catches bh-disabled code). We will
121 * actually run the softirq once we return from
124 * Otherwise we wake up ksoftirqd to make sure we
125 * schedule the softirq soon.
127 if (!(local_irq_count(cpu) | local_bh_count(cpu)))
128 wakeup_softirqd(cpu);
131 void fastcall raise_softirq(unsigned int nr)
135 local_irq_save(flags);
136 cpu_raise_softirq(smp_processor_id(), nr);
137 local_irq_restore(flags);
140 void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
142 softirq_vec[nr].data = data;
143 softirq_vec[nr].action = action;
149 struct tasklet_head tasklet_vec[NR_CPUS] __cacheline_aligned;
150 struct tasklet_head tasklet_hi_vec[NR_CPUS] __cacheline_aligned;
152 void fastcall __tasklet_schedule(struct tasklet_struct *t)
154 int cpu = smp_processor_id();
157 local_irq_save(flags);
158 t->next = tasklet_vec[cpu].list;
159 tasklet_vec[cpu].list = t;
160 cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
161 local_irq_restore(flags);
164 void fastcall __tasklet_hi_schedule(struct tasklet_struct *t)
166 int cpu = smp_processor_id();
169 local_irq_save(flags);
170 t->next = tasklet_hi_vec[cpu].list;
171 tasklet_hi_vec[cpu].list = t;
172 cpu_raise_softirq(cpu, HI_SOFTIRQ);
173 local_irq_restore(flags);
176 static void tasklet_action(struct softirq_action *a)
178 int cpu = smp_processor_id();
179 struct tasklet_struct *list;
182 list = tasklet_vec[cpu].list;
183 tasklet_vec[cpu].list = NULL;
187 struct tasklet_struct *t = list;
191 if (tasklet_trylock(t)) {
192 if (!atomic_read(&t->count)) {
193 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
203 t->next = tasklet_vec[cpu].list;
204 tasklet_vec[cpu].list = t;
205 __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
210 static void tasklet_hi_action(struct softirq_action *a)
212 int cpu = smp_processor_id();
213 struct tasklet_struct *list;
216 list = tasklet_hi_vec[cpu].list;
217 tasklet_hi_vec[cpu].list = NULL;
221 struct tasklet_struct *t = list;
225 if (tasklet_trylock(t)) {
226 if (!atomic_read(&t->count)) {
227 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
237 t->next = tasklet_hi_vec[cpu].list;
238 tasklet_hi_vec[cpu].list = t;
239 __cpu_raise_softirq(cpu, HI_SOFTIRQ);
245 void tasklet_init(struct tasklet_struct *t,
246 void (*func)(unsigned long), unsigned long data)
250 atomic_set(&t->count, 0);
255 void tasklet_kill(struct tasklet_struct *t)
258 printk("Attempt to kill tasklet from interrupt\n");
260 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
261 current->state = TASK_RUNNING;
264 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
266 tasklet_unlock_wait(t);
267 clear_bit(TASKLET_STATE_SCHED, &t->state);
274 static void (*bh_base[32])(void);
275 struct tasklet_struct bh_task_vec[32];
277 /* BHs are serialized by spinlock global_bh_lock.
279 It is still possible to make synchronize_bh() as
280 spin_unlock_wait(&global_bh_lock). This operation is not used
281 by kernel now, so that this lock is not made private only
282 due to wait_on_irq().
284 It can be removed only after auditing all the BHs.
286 spinlock_t global_bh_lock = SPIN_LOCK_UNLOCKED;
288 static void bh_action(unsigned long nr)
290 int cpu = smp_processor_id();
292 if (!spin_trylock(&global_bh_lock))
295 if (!hardirq_trylock(cpu))
301 hardirq_endlock(cpu);
302 spin_unlock(&global_bh_lock);
306 spin_unlock(&global_bh_lock);
311 void init_bh(int nr, void (*routine)(void))
313 bh_base[nr] = routine;
317 void remove_bh(int nr)
319 tasklet_kill(bh_task_vec+nr);
323 void __init softirq_init()
328 tasklet_init(bh_task_vec+i, bh_action, i);
330 open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
331 open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
334 void __run_task_queue(task_queue *list)
336 struct list_head head, *next;
339 spin_lock_irqsave(&tqueue_lock, flags);
340 list_add(&head, list);
342 spin_unlock_irqrestore(&tqueue_lock, flags);
345 while (next != &head) {
350 p = list_entry(next, struct tq_struct, list);
361 static int ksoftirqd(void * __bind_cpu)
363 int bind_cpu = (int) (long) __bind_cpu;
364 int cpu = cpu_logical_map(bind_cpu);
368 sigfillset(¤t->blocked);
370 /* Migrate to the right CPU */
371 current->cpus_allowed = 1UL << cpu;
372 while (smp_processor_id() != cpu)
375 sprintf(current->comm, "ksoftirqd_CPU%d", bind_cpu);
377 __set_current_state(TASK_INTERRUPTIBLE);
380 ksoftirqd_task(cpu) = current;
383 if (!softirq_pending(cpu))
386 __set_current_state(TASK_RUNNING);
388 while (softirq_pending(cpu)) {
390 if (current->need_resched)
394 __set_current_state(TASK_INTERRUPTIBLE);
398 static __init int spawn_ksoftirqd(void)
402 for (cpu = 0; cpu < smp_num_cpus; cpu++) {
403 if (kernel_thread(ksoftirqd, (void *) (long) cpu,
404 CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0)
405 printk("spawn_ksoftirqd() failed for cpu %d\n", cpu);
407 while (!ksoftirqd_task(cpu_logical_map(cpu)))
415 __initcall(spawn_ksoftirqd);