www.usr.com/support/gpl/USR9113_release1.0.tar.gz
[bcm963xx.git] / kernel / linux / kernel / softirq.c
index c336ae2..27d74f0 100755 (executable)
@@ -17,6 +17,9 @@
 #include <linux/kthread.h>
 
 #include <asm/irq.h>
+
+#include <linux/syscalls.h>
+
 /*
    - No shared variables, all the data are CPU local.
    - If a softirq needs serialization, let it serialize itself
@@ -44,6 +47,8 @@ static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;
 
 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 
+int defer_softirqs = 1;
+
 /*
  * we cannot loop indefinitely here to avoid userspace starvation,
  * but we also don't want to introduce a worst case 1/HZ latency
@@ -70,10 +75,10 @@ static inline void wakeup_softirqd(void)
  */
 #define MAX_SOFTIRQ_RESTART 10
 
-asmlinkage void __do_softirq(void)
+asmlinkage void ___do_softirq(void)
 {
        struct softirq_action *h;
-       __u32 pending;
+       __u32 pending, mask;
        int max_restart = MAX_SOFTIRQ_RESTART;
 
        pending = local_softirq_pending();
@@ -87,11 +92,26 @@ restart:
 
        h = softirq_vec;
 
+       mask = 1;
        do {
-               if (pending & 1)
+               if (pending & mask) {
+                       pending ^= mask; /* clear the bit */
                        h->action(h);
+                       if (unlikely(defer_softirqs && need_resched())) {
+                               /*
+                                * We are interrupting softirq processing
+                                * to allow for preemption to occur. Add
+                                * back any non-processed pending bits to
+                                * this CPU's mask:
+                                */
+                               local_irq_disable();
+                               local_softirq_pending() |= pending;
+                               local_irq_enable();
+                               goto out;
+                       }
+               }
                h++;
-               pending >>= 1;
+               mask <<= 1;
        } while (pending);
 
        local_irq_disable();
@@ -103,9 +123,34 @@ restart:
        if (pending)
                wakeup_softirqd();
 
+out:
        __local_bh_enable();
 }
 
+asmlinkage void __do_softirq(void)
+{
+       if (likely(defer_softirqs)) {
+               /*
+                * 'preempt harder'. Push all softirq processing off
+                * to ksoftirqd. This makes softirq related latencies
+                * much more predictable since they run from process
+                * context instead of hardirq context.
+                */
+               if (local_softirq_pending())
+                       wakeup_softirqd();
+               return;
+       }
+       ___do_softirq();
+}
+
+asmlinkage void _do_softirq(void)
+{
+       local_irq_disable();
+       ___do_softirq();
+       local_irq_enable();
+}
+
+
 #ifndef __ARCH_HAS_DO_SOFTIRQ
 
 asmlinkage void do_softirq(void)
@@ -322,8 +367,15 @@ void __init softirq_init(void)
 
 static int ksoftirqd(void * __bind_cpu)
 {
-       set_user_nice(current, 19);
+#if 1
+       set_user_nice(current, -5);
        current->flags |= PF_NOFREEZE;
+#else /* alternative if we want more priority for ksoftirqd */
+       struct sched_param param = { .sched_priority = MAX_RT_PRIO/4-1 };
+       param.sched_priority = 1;
+       sys_sched_setscheduler(current->pid, SCHED_RR, &param);
+       current->flags |= PF_NOFREEZE;
+#endif
 
        set_current_state(TASK_INTERRUPTIBLE);
 
@@ -340,7 +392,7 @@ static int ksoftirqd(void * __bind_cpu)
                        preempt_disable();
                        if (cpu_is_offline((long)__bind_cpu))
                                goto wait_to_die;
-                       do_softirq();
+                       _do_softirq();
                        preempt_enable();
                        cond_resched();
                }