X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=kernel%2Flinux%2Fkernel%2Fsoftirq.c;fp=kernel%2Flinux%2Fkernel%2Fsoftirq.c;h=27d74f08ff45b1d77dd29e178511f1399603e67b;hb=6adeba4d92a546ebbadde2562283ee6b984b22c1;hp=c336ae21b5d7deedfa2866bf3280a5d3f472a599;hpb=dacd86d83a9fb430cca42cb78a67f9d46e289f5c;p=bcm963xx.git diff --git a/kernel/linux/kernel/softirq.c b/kernel/linux/kernel/softirq.c index c336ae21..27d74f08 100755 --- a/kernel/linux/kernel/softirq.c +++ b/kernel/linux/kernel/softirq.c @@ -17,6 +17,9 @@ #include #include + +#include + /* - No shared variables, all the data are CPU local. - If a softirq needs serialization, let it serialize itself @@ -44,6 +47,8 @@ static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp; static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); +int defer_softirqs = 1; + /* * we cannot loop indefinitely here to avoid userspace starvation, * but we also don't want to introduce a worst case 1/HZ latency @@ -70,10 +75,10 @@ static inline void wakeup_softirqd(void) */ #define MAX_SOFTIRQ_RESTART 10 -asmlinkage void __do_softirq(void) +asmlinkage void ___do_softirq(void) { struct softirq_action *h; - __u32 pending; + __u32 pending, mask; int max_restart = MAX_SOFTIRQ_RESTART; pending = local_softirq_pending(); @@ -87,11 +92,26 @@ restart: h = softirq_vec; + mask = 1; do { - if (pending & 1) + if (pending & mask) { + pending ^= mask; /* clear the bit */ h->action(h); + if (unlikely(defer_softirqs && need_resched())) { + /* + * We are interrupting softirq processing + * to allow for preemption to occur. Add + * back any non-processed pending bits to + * this CPU's mask: + */ + local_irq_disable(); + local_softirq_pending() |= pending; + local_irq_enable(); + goto out; + } + } h++; - pending >>= 1; + mask <<= 1; } while (pending); local_irq_disable(); @@ -103,9 +123,34 @@ restart: if (pending) wakeup_softirqd(); +out: __local_bh_enable(); } +asmlinkage void __do_softirq(void) +{ + if (likely(defer_softirqs)) { + /* + * 'preempt harder'. Push all softirq processing off + * to ksoftirqd. This makes softirq related latencies + * much more predictable since they run from process + * context instead of hardirq context. + */ + if (local_softirq_pending()) + wakeup_softirqd(); + return; + } + ___do_softirq(); +} + +asmlinkage void _do_softirq(void) +{ + local_irq_disable(); + ___do_softirq(); + local_irq_enable(); +} + + #ifndef __ARCH_HAS_DO_SOFTIRQ asmlinkage void do_softirq(void) @@ -322,8 +367,15 @@ void __init softirq_init(void) static int ksoftirqd(void * __bind_cpu) { - set_user_nice(current, 19); +#if 1 + set_user_nice(current, -5); current->flags |= PF_NOFREEZE; +#else /* alternative if we want more priority for ksoftirqd */ + struct sched_param param = { .sched_priority = MAX_RT_PRIO/4-1 }; + param.sched_priority = 1; + sys_sched_setscheduler(current->pid, SCHED_RR, ¶m); + current->flags |= PF_NOFREEZE; +#endif set_current_state(TASK_INTERRUPTIBLE); @@ -340,7 +392,7 @@ static int ksoftirqd(void * __bind_cpu) preempt_disable(); if (cpu_is_offline((long)__bind_cpu)) goto wait_to_die; - do_softirq(); + _do_softirq(); preempt_enable(); cond_resched(); }