Merge branches 'release', 'cpuidle-2.6.25' and 'idle' into release
authorLen Brown <len.brown@intel.com>
Thu, 7 Feb 2008 08:11:05 +0000 (03:11 -0500)
committerLen Brown <len.brown@intel.com>
Thu, 7 Feb 2008 08:11:05 +0000 (03:11 -0500)
arch/x86/Kconfig
drivers/acpi/processor_idle.c
drivers/cpuidle/Kconfig
drivers/cpuidle/cpuidle.c
include/acpi/processor.h
include/linux/cpuidle.h

index e6728bd..3954ae9 100644 (file)
@@ -98,6 +98,9 @@ config ARCH_HAS_ILOG2_U32
 config ARCH_HAS_ILOG2_U64
        def_bool n
 
+config ARCH_HAS_CPU_IDLE_WAIT
+       def_bool y
+
 config GENERIC_CALIBRATE_DELAY
        def_bool y
 
@@ -105,6 +108,9 @@ config GENERIC_TIME_VSYSCALL
        bool
        default X86_64
 
+config ARCH_HAS_CPU_RELAX
+       def_bool y
+
 config HAVE_SETUP_PER_CPU_AREA
        def_bool X86_64
 
index 199ea21..32003fd 100644 (file)
@@ -98,6 +98,9 @@ module_param(bm_history, uint, 0644);
 
 static int acpi_processor_set_power_policy(struct acpi_processor *pr);
 
+#else  /* CONFIG_CPU_IDLE */
+static unsigned int latency_factor __read_mostly = 2;
+module_param(latency_factor, uint, 0644);
 #endif
 
 /*
@@ -201,6 +204,10 @@ static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
                return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
 }
 
+/*
+ * Callers should disable interrupts before the call and enable
+ * interrupts after return.
+ */
 static void acpi_safe_halt(void)
 {
        current_thread_info()->status &= ~TS_POLLING;
@@ -261,7 +268,7 @@ static atomic_t c3_cpu_count;
 /* Common C-state entry for C2, C3, .. */
 static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
 {
-       if (cstate->space_id == ACPI_CSTATE_FFH) {
+       if (cstate->entry_method == ACPI_CSTATE_FFH) {
                /* Call into architectural FFH based C-state */
                acpi_processor_ffh_cstate_enter(cstate);
        } else {
@@ -413,6 +420,8 @@ static void acpi_processor_idle(void)
                        pm_idle_save();
                else
                        acpi_safe_halt();
+
+               local_irq_enable();
                return;
        }
 
@@ -521,6 +530,7 @@ static void acpi_processor_idle(void)
                 *       skew otherwise.
                 */
                sleep_ticks = 0xFFFFFFFF;
+               local_irq_enable();
                break;
 
        case ACPI_STATE_C2:
@@ -922,20 +932,20 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
                cx.address = reg->address;
                cx.index = current_count + 1;
 
-               cx.space_id = ACPI_CSTATE_SYSTEMIO;
+               cx.entry_method = ACPI_CSTATE_SYSTEMIO;
                if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
                        if (acpi_processor_ffh_cstate_probe
                                        (pr->id, &cx, reg) == 0) {
-                               cx.space_id = ACPI_CSTATE_FFH;
-                       } else if (cx.type != ACPI_STATE_C1) {
+                               cx.entry_method = ACPI_CSTATE_FFH;
+                       } else if (cx.type == ACPI_STATE_C1) {
                                /*
                                 * C1 is a special case where FIXED_HARDWARE
                                 * can be handled in non-MWAIT way as well.
                                 * In that case, save this _CST entry info.
-                                * That is, we retain space_id of SYSTEM_IO for
-                                * halt based C1.
                                 * Otherwise, ignore this info and continue.
                                 */
+                               cx.entry_method = ACPI_CSTATE_HALT;
+                       } else {
                                continue;
                        }
                }
@@ -1369,12 +1379,16 @@ static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
 /**
  * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
  * @cx: cstate data
+ *
+ * Caller disables interrupt before call and enables interrupt after return.
  */
 static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
 {
-       if (cx->space_id == ACPI_CSTATE_FFH) {
+       if (cx->entry_method == ACPI_CSTATE_FFH) {
                /* Call into architectural FFH based C-state */
                acpi_processor_ffh_cstate_enter(cx);
+       } else if (cx->entry_method == ACPI_CSTATE_HALT) {
+               acpi_safe_halt();
        } else {
                int unused;
                /* IO port based C-state */
@@ -1396,21 +1410,27 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
 static int acpi_idle_enter_c1(struct cpuidle_device *dev,
                              struct cpuidle_state *state)
 {
+       u32 t1, t2;
        struct acpi_processor *pr;
        struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
+
        pr = processors[smp_processor_id()];
 
        if (unlikely(!pr))
                return 0;
 
+       local_irq_disable();
        if (pr->flags.bm_check)
                acpi_idle_update_bm_rld(pr, cx);
 
-       acpi_safe_halt();
+       t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+       acpi_idle_do_entry(cx);
+       t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
 
+       local_irq_enable();
        cx->usage++;
 
-       return 0;
+       return ticks_elapsed_in_us(t1, t2);
 }
 
 /**
@@ -1517,7 +1537,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
                if (dev->safe_state) {
                        return dev->safe_state->enter(dev, dev->safe_state);
                } else {
+                       local_irq_disable();
                        acpi_safe_halt();
+                       local_irq_enable();
                        return 0;
                }
        }
@@ -1609,7 +1631,7 @@ struct cpuidle_driver acpi_idle_driver = {
  */
 static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
 {
-       int i, count = 0;
+       int i, count = CPUIDLE_DRIVER_STATE_START;
        struct acpi_processor_cx *cx;
        struct cpuidle_state *state;
        struct cpuidle_device *dev = &pr->power.dev;
@@ -1638,13 +1660,14 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
 
                snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
                state->exit_latency = cx->latency;
-               state->target_residency = cx->latency * 6;
+               state->target_residency = cx->latency * latency_factor;
                state->power_usage = cx->power;
 
                state->flags = 0;
                switch (cx->type) {
                        case ACPI_STATE_C1:
                        state->flags |= CPUIDLE_FLAG_SHALLOW;
+                       state->flags |= CPUIDLE_FLAG_TIME_VALID;
                        state->enter = acpi_idle_enter_c1;
                        dev->safe_state = state;
                        break;
@@ -1667,6 +1690,8 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
                }
 
                count++;
+               if (count == CPUIDLE_STATE_MAX)
+                       break;
        }
 
        dev->state_count = count;
index 3bed412..7dbc4a8 100644 (file)
@@ -1,13 +1,13 @@
 
 config CPU_IDLE
        bool "CPU idle PM support"
+       default ACPI
        help
          CPU idle is a generic framework for supporting software-controlled
          idle processor power management.  It includes modular cross-platform
          governors that can be swapped during runtime.
 
-         If you're using a mobile platform that supports CPU idle PM (e.g.
-         an ACPI-capable notebook), you should say Y here.
+         If you're using an ACPI-enabled platform, you should say Y here.
 
 config CPU_IDLE_GOV_LADDER
        bool
index 2a98d99..d868d73 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/pm_qos_params.h>
 #include <linux/cpu.h>
 #include <linux/cpuidle.h>
+#include <linux/ktime.h>
 
 #include "cpuidle.h"
 
@@ -82,7 +83,7 @@ void cpuidle_uninstall_idle_handler(void)
 {
        if (enabled_devices && (pm_idle != pm_idle_old)) {
                pm_idle = pm_idle_old;
-               cpu_idle_wait();
+               cpuidle_kick_cpus();
        }
 }
 
@@ -180,6 +181,44 @@ void cpuidle_disable_device(struct cpuidle_device *dev)
 
 EXPORT_SYMBOL_GPL(cpuidle_disable_device);
 
+#ifdef CONFIG_ARCH_HAS_CPU_RELAX
+static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
+{
+       ktime_t t1, t2;
+       s64 diff;
+       int ret;
+
+       t1 = ktime_get();
+       local_irq_enable();
+       while (!need_resched())
+               cpu_relax();
+
+       t2 = ktime_get();
+       diff = ktime_to_us(ktime_sub(t2, t1));
+       if (diff > INT_MAX)
+               diff = INT_MAX;
+
+       ret = (int) diff;
+       return ret;
+}
+
+static void poll_idle_init(struct cpuidle_device *dev)
+{
+       struct cpuidle_state *state = &dev->states[0];
+
+       cpuidle_set_statedata(state, NULL);
+
+       snprintf(state->name, CPUIDLE_NAME_LEN, "C0 (poll idle)");
+       state->exit_latency = 0;
+       state->target_residency = 0;
+       state->power_usage = -1;
+       state->flags = CPUIDLE_FLAG_POLL | CPUIDLE_FLAG_TIME_VALID;
+       state->enter = poll_idle;
+}
+#else
+static void poll_idle_init(struct cpuidle_device *dev) {}
+#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
+
 /**
  * cpuidle_register_device - registers a CPU's idle PM feature
  * @dev: the cpu
@@ -198,6 +237,8 @@ int cpuidle_register_device(struct cpuidle_device *dev)
 
        mutex_lock(&cpuidle_lock);
 
+       poll_idle_init(dev);
+
        per_cpu(cpuidle_devices, dev->cpu) = dev;
        list_add(&dev->device_list, &cpuidle_detected_devices);
        if ((ret = cpuidle_add_sysfs(sys_dev))) {
index 6e253b5..f6d7c50 100644 (file)
@@ -34,6 +34,7 @@
 
 #define ACPI_CSTATE_SYSTEMIO   (0)
 #define ACPI_CSTATE_FFH                (1)
+#define ACPI_CSTATE_HALT       (2)
 
 /* Power Management */
 
@@ -64,7 +65,7 @@ struct acpi_processor_cx {
        u8 valid;
        u8 type;
        u32 address;
-       u8 space_id;
+       u8 entry_method;
        u8 index;
        u32 latency;
        u32 latency_ticks;
index b0fd85a..c8eb8c7 100644 (file)
@@ -46,9 +46,10 @@ struct cpuidle_state {
 /* Idle State Flags */
 #define CPUIDLE_FLAG_TIME_VALID        (0x01) /* is residency time measurable? */
 #define CPUIDLE_FLAG_CHECK_BM  (0x02) /* BM activity will exit state */
-#define CPUIDLE_FLAG_SHALLOW   (0x10) /* low latency, minimal savings */
-#define CPUIDLE_FLAG_BALANCED  (0x20) /* medium latency, moderate savings */
-#define CPUIDLE_FLAG_DEEP      (0x40) /* high latency, large savings */
+#define CPUIDLE_FLAG_POLL      (0x10) /* no latency, no savings */
+#define CPUIDLE_FLAG_SHALLOW   (0x20) /* low latency, minimal savings */
+#define CPUIDLE_FLAG_BALANCED  (0x40) /* medium latency, moderate savings */
+#define CPUIDLE_FLAG_DEEP      (0x80) /* high latency, large savings */
 
 #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
 
@@ -72,6 +73,19 @@ cpuidle_set_statedata(struct cpuidle_state *state, void *data)
        state->driver_data = data;
 }
 
+#ifdef CONFIG_SMP
+#ifdef CONFIG_ARCH_HAS_CPU_IDLE_WAIT
+static inline void cpuidle_kick_cpus(void)
+{
+       cpu_idle_wait();
+}
+#else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT */
+#error "Arch needs cpu_idle_wait() equivalent here"
+#endif /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT */
+#else /* !CONFIG_SMP */
+static inline void cpuidle_kick_cpus(void) {}
+#endif /* !CONFIG_SMP */
+
 struct cpuidle_state_kobj {
        struct cpuidle_state *state;
        struct completion kobj_unregister;
@@ -178,4 +192,10 @@ static inline void cpuidle_unregister_governor(struct cpuidle_governor *gov) { }
 
 #endif
 
+#ifdef CONFIG_ARCH_HAS_CPU_RELAX
+#define CPUIDLE_DRIVER_STATE_START     1
+#else
+#define CPUIDLE_DRIVER_STATE_START     0
+#endif
+
 #endif /* _LINUX_CPUIDLE_H */