2
* cpu_idle - xen idle state module derived from Linux
3
* drivers/acpi/processor_idle.c &
4
* arch/x86/kernel/acpi/cstate.c
6
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
7
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
8
* Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
9
* Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10
* - Added processor hotplug support
11
* Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
12
* - Added support for C3 on SMP
13
* Copyright (C) 2007, 2008 Intel Corporation
15
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
17
* This program is free software; you can redistribute it and/or modify
18
* it under the terms of the GNU General Public License as published by
19
* the Free Software Foundation; either version 2 of the License, or (at
20
* your option) any later version.
22
* This program is distributed in the hope that it will be useful, but
23
* WITHOUT ANY WARRANTY; without even the implied warranty of
24
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
25
* General Public License for more details.
27
* You should have received a copy of the GNU General Public License along
28
* with this program; if not, write to the Free Software Foundation, Inc.,
29
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
31
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
34
#include <xen/config.h>
35
#include <xen/errno.h>
37
#include <xen/types.h>
40
#include <xen/guest_access.h>
41
#include <xen/keyhandler.h>
42
#include <xen/cpuidle.h>
43
#include <xen/trace.h>
44
#include <xen/sched-if.h>
45
#include <asm/cache.h>
48
#include <asm/processor.h>
49
#include <xen/pmstat.h>
50
#include <public/platform.h>
51
#include <public/sysctl.h>
52
#include <acpi/cpufreq/cpufreq.h>
54
/*#define DEBUG_PM_CX*/
56
static void lapic_timer_nop(void) { }
57
static void (*lapic_timer_off)(void);
58
static void (*lapic_timer_on)(void);
60
extern void (*pm_idle) (void);
61
extern void (*dead_idle) (void);
62
extern void menu_get_trace_data(u32 *expected, u32 *pred);
64
static void (*pm_idle_save) (void) __read_mostly;
65
unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER - 1;
66
integer_param("max_cstate", max_cstate);
67
static int local_apic_timer_c2_ok __read_mostly = 0;
68
boolean_param("lapic_timer_c2_ok", local_apic_timer_c2_ok);
70
static struct acpi_processor_power *__read_mostly processor_powers[NR_CPUS];
72
static void print_acpi_power(uint32_t cpu, struct acpi_processor_power *power)
74
uint32_t i, idle_usage = 0;
75
uint64_t res, idle_res = 0;
77
printk("==cpu%d==\n", cpu);
78
printk("active state:\t\tC%d\n",
79
power->last_state ? power->last_state->idx : -1);
80
printk("max_cstate:\t\tC%d\n", max_cstate);
83
for ( i = 1; i < power->count; i++ )
85
res = acpi_pm_tick_to_ns(power->states[i].time);
86
idle_usage += power->states[i].usage;
89
printk((power->last_state && power->last_state->idx == i) ?
92
printk("type[C%d] ", power->states[i].type);
93
printk("latency[%03d] ", power->states[i].latency);
94
printk("usage[%08d] ", power->states[i].usage);
95
printk("duration[%"PRId64"]\n", res);
97
printk(" C0:\tusage[%08d] duration[%"PRId64"]\n",
98
idle_usage, NOW() - idle_res);
102
static void dump_cx(unsigned char key)
106
printk("'%c' pressed -> printing ACPI Cx structures\n", key);
107
for_each_online_cpu ( cpu )
108
if (processor_powers[cpu])
109
print_acpi_power(cpu, processor_powers[cpu]);
112
static struct keyhandler dump_cx_keyhandler = {
115
.desc = "dump ACPI Cx structures"
118
static int __init cpu_idle_key_init(void)
120
register_keyhandler('c', &dump_cx_keyhandler);
123
__initcall(cpu_idle_key_init);
125
static inline u32 ticks_elapsed(u32 t1, u32 t2)
129
else if ( !(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) )
130
return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
132
return ((0xFFFFFFFF - t1) + t2);
135
static void acpi_safe_halt(void)
137
smp_mb__after_clear_bit();
141
#define MWAIT_ECX_INTERRUPT_BREAK (0x1)
143
static void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
145
__monitor((void *)current, 0, 0);
150
static void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
152
mwait_idle_with_hints(cx->address, MWAIT_ECX_INTERRUPT_BREAK);
155
static void acpi_idle_do_entry(struct acpi_processor_cx *cx)
159
switch ( cx->entry_method )
161
case ACPI_CSTATE_EM_FFH:
162
/* Call into architectural FFH based C-state */
163
acpi_processor_ffh_cstate_enter(cx);
165
case ACPI_CSTATE_EM_SYSIO:
166
/* IO port based C-state */
168
/* Dummy wait op - must do something useless after P_LVL2 read
169
because chipsets cannot guarantee that STPCLK# signal
170
gets asserted in time to freeze execution properly. */
171
unused = inl(pmtmr_ioport);
173
case ACPI_CSTATE_EM_HALT:
180
static int acpi_idle_bm_check(void)
184
acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
186
acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
188
* TBD: PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
189
* the true state of bus mastering activity; forcing us to
190
* manually check the BMIDEA bit of each IDE channel.
198
} c3_cpu_status = { .lock = SPIN_LOCK_UNLOCKED };
200
static inline void trace_exit_reason(u32 *irq_traced)
202
if ( unlikely(tb_init_done) )
205
u32 irr_status[8] = { 0 };
207
/* Get local apic IRR register */
208
for ( i = 0; i < 8; i++ )
209
irr_status[i] = apic_read(APIC_IRR + (i << 4));
211
curbit = find_first_bit((const unsigned long *)irr_status, 256);
212
while ( i < 4 && curbit < 256 )
214
irq_traced[i++] = curbit;
215
curbit = find_next_bit((const unsigned long *)irr_status, 256, curbit + 1);
220
/* vcpu is urgent if vcpu is polling event channel
222
* if urgent vcpu exists, CPU should not enter deep C state
224
static int sched_has_urgent_vcpu(void)
226
return atomic_read(&this_cpu(schedule_data).urgent_count);
229
static void acpi_processor_idle(void)
231
struct acpi_processor_power *power = processor_powers[smp_processor_id()];
232
struct acpi_processor_cx *cx = NULL;
236
u32 exp = 0, pred = 0;
237
u32 irq_traced[4] = { 0 };
239
if ( max_cstate > 0 && power && !sched_has_urgent_vcpu() &&
240
(next_state = cpuidle_current_governor->select(power)) > 0 )
242
cx = &power->states[next_state];
243
if ( power->flags.bm_check && acpi_idle_bm_check()
244
&& cx->type == ACPI_STATE_C3 )
245
cx = power->safe_state;
246
if ( cx->idx > max_cstate )
247
cx = &power->states[max_cstate];
248
menu_get_trace_data(&exp, &pred);
259
cpufreq_dbs_timer_suspend();
261
sched_tick_suspend();
262
/* sched_tick_suspend() can raise TIMER_SOFTIRQ. Process it now. */
263
process_pending_softirqs();
266
* Interrupts must be disabled during bus mastering calculations and
267
* for C2/C3 transitions.
271
if ( softirq_pending(smp_processor_id()) ||
272
cpu_is_offline(smp_processor_id()) )
276
cpufreq_dbs_timer_resume();
280
power->last_state = cx;
285
* Invoke the current Cx state to put the processor to sleep.
291
if ( cx->type == ACPI_STATE_C1 || local_apic_timer_c2_ok )
293
/* Get start time (ticks) */
294
t1 = inl(pmtmr_ioport);
295
/* Trace cpu idle entry */
296
TRACE_4D(TRC_PM_IDLE_ENTRY, cx->idx, t1, exp, pred);
298
acpi_idle_do_entry(cx);
299
/* Get end time (ticks) */
300
t2 = inl(pmtmr_ioport);
301
trace_exit_reason(irq_traced);
302
/* Trace cpu idle exit */
303
TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2,
304
irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]);
305
/* Re-enable interrupts */
307
/* Compute time (ticks) that we were actually asleep */
308
sleep_ticks = ticks_elapsed(t1, t2);
315
* bm_check implies we need ARB_DIS
316
* !bm_check implies we need cache flush
317
* bm_control implies whether we can do ARB_DIS
319
* That leaves a case where bm_check is set and bm_control is
320
* not set. In that case we cannot do much, we enter C3
321
* without doing anything.
323
if ( power->flags.bm_check && power->flags.bm_control )
325
spin_lock(&c3_cpu_status.lock);
326
if ( ++c3_cpu_status.count == num_online_cpus() )
329
* All CPUs are trying to go to C3
330
* Disable bus master arbitration
332
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
334
spin_unlock(&c3_cpu_status.lock);
336
else if ( !power->flags.bm_check )
338
/* SMP with no shared cache... Invalidate cache */
339
ACPI_FLUSH_CPU_CACHE();
343
* Before invoking C3, be aware that TSC/APIC timer may be
344
* stopped by H/W. Without carefully handling of TSC/APIC stop issues,
345
* deep C state can't work correctly.
347
/* preparing APIC stop */
350
/* Get start time (ticks) */
351
t1 = inl(pmtmr_ioport);
352
/* Trace cpu idle entry */
353
TRACE_4D(TRC_PM_IDLE_ENTRY, cx->idx, t1, exp, pred);
355
acpi_idle_do_entry(cx);
356
/* Get end time (ticks) */
357
t2 = inl(pmtmr_ioport);
360
cstate_restore_tsc();
361
trace_exit_reason(irq_traced);
362
/* Trace cpu idle exit */
363
TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2,
364
irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]);
366
if ( power->flags.bm_check && power->flags.bm_control )
368
/* Enable bus master arbitration */
369
spin_lock(&c3_cpu_status.lock);
370
if ( c3_cpu_status.count-- == num_online_cpus() )
371
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
372
spin_unlock(&c3_cpu_status.lock);
375
/* Re-enable interrupts */
377
/* recovering APIC */
379
/* Compute time (ticks) that we were actually asleep */
380
sleep_ticks = ticks_elapsed(t1, t2);
387
cpufreq_dbs_timer_resume();
392
if ( sleep_ticks > 0 )
394
power->last_residency = acpi_pm_tick_to_ns(sleep_ticks) / 1000UL;
395
cx->time += sleep_ticks;
399
cpufreq_dbs_timer_resume();
401
if ( cpuidle_current_governor->reflect )
402
cpuidle_current_governor->reflect(power);
405
static void acpi_dead_idle(void)
407
struct acpi_processor_power *power;
408
struct acpi_processor_cx *cx;
411
if ( (power = processor_powers[smp_processor_id()]) == NULL )
414
if ( (cx = &power->states[power->count-1]) == NULL )
419
if ( !power->flags.bm_check && cx->type == ACPI_STATE_C3 )
420
ACPI_FLUSH_CPU_CACHE();
422
switch ( cx->entry_method )
424
case ACPI_CSTATE_EM_FFH:
425
/* Not treat interrupt as break event */
426
mwait_idle_with_hints(cx->address, 0);
428
case ACPI_CSTATE_EM_SYSIO:
430
unused = inl(pmtmr_ioport);
442
static int init_cx_pminfo(struct acpi_processor_power *acpi_power)
446
memset(acpi_power, 0, sizeof(*acpi_power));
448
for ( i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++ )
449
acpi_power->states[i].idx = i;
451
acpi_power->states[ACPI_STATE_C1].type = ACPI_STATE_C1;
452
acpi_power->states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_EM_HALT;
454
acpi_power->states[ACPI_STATE_C0].valid = 1;
455
acpi_power->states[ACPI_STATE_C1].valid = 1;
457
acpi_power->count = 2;
458
acpi_power->safe_state = &acpi_power->states[ACPI_STATE_C1];
463
#define CPUID_MWAIT_LEAF (5)
464
#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
465
#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
467
#define MWAIT_ECX_INTERRUPT_BREAK (0x1)
469
#define MWAIT_SUBSTATE_MASK (0xf)
470
#define MWAIT_SUBSTATE_SIZE (4)
472
static int acpi_processor_ffh_cstate_probe(xen_processor_cx_t *cx)
474
struct cpuinfo_x86 *c = ¤t_cpu_data;
475
unsigned int eax, ebx, ecx, edx;
476
unsigned int edx_part;
477
unsigned int cstate_type; /* C-state type and not ACPI C-state type */
478
unsigned int num_cstate_subtype;
480
if ( c->cpuid_level < CPUID_MWAIT_LEAF )
482
printk(XENLOG_INFO "MWAIT leaf not supported by cpuid\n");
486
cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
487
printk(XENLOG_DEBUG "cpuid.MWAIT[.eax=%x, .ebx=%x, .ecx=%x, .edx=%x]\n",
490
/* Check whether this particular cx_type (in CST) is supported or not */
491
cstate_type = (cx->reg.address >> MWAIT_SUBSTATE_SIZE) + 1;
492
edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
493
num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
495
if ( num_cstate_subtype < (cx->reg.address & MWAIT_SUBSTATE_MASK) )
498
/* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */
499
if ( !(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
500
!(ecx & CPUID5_ECX_INTERRUPT_BREAK) )
503
printk(XENLOG_INFO "Monitor-Mwait will be used to enter C-%d state\n", cx->type);
508
* Initialize bm_flags based on the CPU cache properties
509
* On SMP it depends on cache configuration
510
* - When cache is not shared among all CPUs, we flush cache
511
* before entering C3.
512
* - When cache is shared among all CPUs, we use bm_check
513
* mechanism as in UP case
515
* This routine is called only after all the CPUs are online
517
static void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags)
519
struct cpuinfo_x86 *c = ¤t_cpu_data;
522
if ( num_online_cpus() == 1 )
524
else if ( c->x86_vendor == X86_VENDOR_INTEL )
527
* Today all MP CPUs that support C3 share cache.
528
* And caches should not be flushed by software while
529
* entering C3 type state.
535
* On all recent platforms, ARB_DISABLE is a nop.
536
* So, set bm_control to zero to indicate that ARB_DISABLE
537
* is not required while entering C3 type state on
538
* P4, Core and beyond CPUs
540
if ( c->x86_vendor == X86_VENDOR_INTEL &&
541
(c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 14)) )
542
flags->bm_control = 0;
545
#define VENDOR_INTEL (1)
546
#define NATIVE_CSTATE_BEYOND_HALT (2)
548
static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx)
550
static int bm_check_flag = -1;
551
static int bm_control_flag = -1;
553
switch ( cx->reg.space_id )
555
case ACPI_ADR_SPACE_SYSTEM_IO:
556
if ( cx->reg.address == 0 )
560
case ACPI_ADR_SPACE_FIXED_HARDWARE:
561
if ( cx->reg.bit_width != VENDOR_INTEL ||
562
cx->reg.bit_offset != NATIVE_CSTATE_BEYOND_HALT )
565
/* assume all logical cpu has the same support for mwait */
566
if ( acpi_processor_ffh_cstate_probe(cx) )
577
if ( local_apic_timer_c2_ok )
580
if ( boot_cpu_has(X86_FEATURE_ARAT) )
582
lapic_timer_off = lapic_timer_nop;
583
lapic_timer_on = lapic_timer_nop;
585
else if ( hpet_broadcast_is_available() )
587
lapic_timer_off = hpet_broadcast_enter;
588
lapic_timer_on = hpet_broadcast_exit;
590
else if ( pit_broadcast_is_available() )
592
lapic_timer_off = pit_broadcast_enter;
593
lapic_timer_on = pit_broadcast_exit;
600
/* All the logic here assumes flags.bm_check is same across all CPUs */
601
if ( bm_check_flag == -1 )
603
/* Determine whether bm_check is needed based on CPU */
604
acpi_processor_power_init_bm_check(&(power->flags));
605
bm_check_flag = power->flags.bm_check;
606
bm_control_flag = power->flags.bm_control;
610
power->flags.bm_check = bm_check_flag;
611
power->flags.bm_control = bm_control_flag;
614
if ( power->flags.bm_check )
616
if ( !power->flags.bm_control )
618
if ( power->flags.has_cst != 1 )
620
/* bus mastering control is necessary */
621
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
622
"C3 support requires BM control\n"));
627
/* Here we enter C3 without bus mastering */
628
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
629
"C3 support without BM control\n"));
633
* On older chipsets, BM_RLD needs to be set
634
* in order for Bus Master activity to wake the
635
* system from C3. Newer chipsets handle DMA
636
* during C3 automatically and BM_RLD is a NOP.
637
* In either case, the proper way to
638
* handle BM_RLD is to set it and leave it set.
640
acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
645
* WBINVD should be set in fadt, for C3 state to be
646
* supported on when bm_check is not required.
648
if ( !(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD) )
650
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
651
"Cache invalidation should work properly"
652
" for C3 to be enabled on SMP systems\n"));
655
acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
664
static unsigned int latency_factor = 2;
665
integer_param("idle_latency_factor", latency_factor);
668
struct acpi_processor_power *acpi_power,
669
xen_processor_cx_t *xen_cx)
671
struct acpi_processor_cx *cx;
673
if ( check_cx(acpi_power, xen_cx) != 0 )
676
if ( xen_cx->type == ACPI_STATE_C1 )
677
cx = &acpi_power->states[1];
679
cx = &acpi_power->states[acpi_power->count];
685
cx->type = xen_cx->type;
686
cx->address = xen_cx->reg.address;
688
switch ( xen_cx->reg.space_id )
690
case ACPI_ADR_SPACE_FIXED_HARDWARE:
691
if ( xen_cx->reg.bit_width == VENDOR_INTEL &&
692
xen_cx->reg.bit_offset == NATIVE_CSTATE_BEYOND_HALT )
693
cx->entry_method = ACPI_CSTATE_EM_FFH;
695
cx->entry_method = ACPI_CSTATE_EM_HALT;
697
case ACPI_ADR_SPACE_SYSTEM_IO:
698
cx->entry_method = ACPI_CSTATE_EM_SYSIO;
701
cx->entry_method = ACPI_CSTATE_EM_NONE;
704
cx->latency = xen_cx->latency;
705
cx->power = xen_cx->power;
707
cx->latency_ticks = ns_to_acpi_pm_tick(cx->latency * 1000UL);
708
cx->target_residency = cx->latency * latency_factor;
709
if ( cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 )
710
acpi_power->safe_state = cx;
713
int get_cpu_id(u8 acpi_id)
718
apic_id = x86_acpiid_to_apicid[acpi_id];
719
if ( apic_id == 0xff )
722
for ( i = 0; i < NR_CPUS; i++ )
724
if ( apic_id == x86_cpu_to_apicid[i] )
732
static void print_cx_pminfo(uint32_t cpu, struct xen_processor_power *power)
734
XEN_GUEST_HANDLE(xen_processor_cx_t) states;
735
xen_processor_cx_t state;
736
XEN_GUEST_HANDLE(xen_processor_csd_t) csd;
737
xen_processor_csd_t dp;
740
printk("cpu%d cx acpi info:\n", cpu);
741
printk("\tcount = %d\n", power->count);
742
printk("\tflags: bm_cntl[%d], bm_chk[%d], has_cst[%d],\n"
743
"\t pwr_setup_done[%d], bm_rld_set[%d]\n",
744
power->flags.bm_control, power->flags.bm_check, power->flags.has_cst,
745
power->flags.power_setup_done, power->flags.bm_rld_set);
747
states = power->states;
749
for ( i = 0; i < power->count; i++ )
751
if ( unlikely(copy_from_guest_offset(&state, states, i, 1)) )
754
printk("\tstates[%d]:\n", i);
755
printk("\t\treg.space_id = 0x%x\n", state.reg.space_id);
756
printk("\t\treg.bit_width = 0x%x\n", state.reg.bit_width);
757
printk("\t\treg.bit_offset = 0x%x\n", state.reg.bit_offset);
758
printk("\t\treg.access_size = 0x%x\n", state.reg.access_size);
759
printk("\t\treg.address = 0x%"PRIx64"\n", state.reg.address);
760
printk("\t\ttype = %d\n", state.type);
761
printk("\t\tlatency = %d\n", state.latency);
762
printk("\t\tpower = %d\n", state.power);
765
printk("\t\tdp(@0x%p)\n", csd.p);
769
if ( unlikely(copy_from_guest(&dp, csd, 1)) )
771
printk("\t\t\tdomain = %d\n", dp.domain);
772
printk("\t\t\tcoord_type = %d\n", dp.coord_type);
773
printk("\t\t\tnum = %d\n", dp.num);
778
#define print_cx_pminfo(c, p)
781
long set_cx_pminfo(uint32_t cpu, struct xen_processor_power *power)
783
XEN_GUEST_HANDLE(xen_processor_cx_t) states;
784
xen_processor_cx_t xen_cx;
785
struct acpi_processor_power *acpi_power;
788
if ( unlikely(!guest_handle_okay(power->states, power->count)) )
791
print_cx_pminfo(cpu, power);
793
/* map from acpi_id to cpu_id */
794
cpu_id = get_cpu_id((u8)cpu);
797
printk(XENLOG_ERR "no cpu_id for acpi_id %d\n", cpu);
801
acpi_power = processor_powers[cpu_id];
804
acpi_power = xmalloc(struct acpi_processor_power);
807
memset(acpi_power, 0, sizeof(*acpi_power));
808
processor_powers[cpu_id] = acpi_power;
811
init_cx_pminfo(acpi_power);
813
acpi_power->cpu = cpu_id;
814
acpi_power->flags.bm_check = power->flags.bm_check;
815
acpi_power->flags.bm_control = power->flags.bm_control;
816
acpi_power->flags.has_cst = power->flags.has_cst;
818
states = power->states;
820
for ( i = 0; i < power->count; i++ )
822
if ( unlikely(copy_from_guest_offset(&xen_cx, states, i, 1)) )
825
set_cx(acpi_power, &xen_cx);
828
if ( cpuidle_current_governor->enable &&
829
cpuidle_current_governor->enable(acpi_power) )
832
/* FIXME: C-state dependency is not supported by far */
834
/*print_acpi_power(cpu_id, acpi_power);*/
836
if ( cpu_id == 0 && pm_idle_save == NULL )
838
pm_idle_save = pm_idle;
839
pm_idle = acpi_processor_idle;
844
dead_idle = acpi_dead_idle;
850
uint32_t pmstat_get_cx_nr(uint32_t cpuid)
852
return processor_powers[cpuid] ? processor_powers[cpuid]->count : 0;
855
int pmstat_get_cx_stat(uint32_t cpuid, struct pm_cx_stat *stat)
857
const struct acpi_processor_power *power = processor_powers[cpuid];
858
uint64_t usage, res, idle_usage = 0, idle_res = 0;
869
stat->last = power->last_state ? power->last_state->idx : 0;
870
stat->nr = power->count;
871
stat->idle_time = get_cpu_idle_time(cpuid);
873
for ( i = power->count - 1; i >= 0; i-- )
877
usage = power->states[i].usage;
878
res = acpi_pm_tick_to_ns(power->states[i].time);
885
res = NOW() - idle_res;
887
if ( copy_to_guest_offset(stat->triggers, i, &usage, 1) ||
888
copy_to_guest_offset(stat->residencies, i, &res, 1) )
895
int pmstat_reset_cx_stat(uint32_t cpuid)
900
void cpuidle_disable_deep_cstate(void)
902
if ( max_cstate > 1 )
904
if ( local_apic_timer_c2_ok )
912
hpet_disable_legacy_broadcast();