1
/******************************************************************************
7
#include <xen/config.h>
10
#include <xen/timer.h>
12
#include <xen/softirq.h>
14
#include <asm/fixmap.h>
15
#include <asm/div64.h>
18
#include <mach_apic.h>
19
#include <xen/cpuidle.h>
21
#define MAX_DELTA_NS MILLISECS(10*1000)
22
#define MIN_DELTA_NS MICROSECS(20)
24
#define MAX_HPET_NUM 32
26
#define HPET_EVT_USED_BIT 0
27
#define HPET_EVT_USED (1 << HPET_EVT_USED_BIT)
28
#define HPET_EVT_DISABLE_BIT 1
29
#define HPET_EVT_DISABLE (1 << HPET_EVT_DISABLE_BIT)
31
struct hpet_event_channel
38
void (*event_handler)(struct hpet_event_channel *);
40
unsigned int idx; /* physical channel idx */
41
int cpu; /* msi target */
42
int irq; /* msi irq */
43
unsigned int flags; /* HPET_EVT_x */
44
} __cacheline_aligned;
45
static struct hpet_event_channel legacy_hpet_event;
46
static struct hpet_event_channel hpet_events[MAX_HPET_NUM] =
47
{ [0 ... MAX_HPET_NUM-1].irq = -1 };
48
static unsigned int num_hpets_used; /* msi hpet channels used for broadcast */
50
DEFINE_PER_CPU(struct hpet_event_channel *, cpu_bc_channel);
52
static int *irq_channel;
54
#define irq_to_channel(irq) irq_channel[irq]
56
unsigned long hpet_address;
59
* force_hpet_broadcast: by default legacy hpet broadcast will be stopped
60
* if RTC interrupts are enabled. Enable this option if want to always enable
61
* legacy hpet broadcast for deep C state
63
int force_hpet_broadcast;
64
boolean_param("hpetbroadcast", force_hpet_broadcast);
67
* Calculate a multiplication factor for scaled math, which is used to convert
68
* nanoseconds based values to clock ticks:
70
* clock_ticks = (nanoseconds * factor) >> shift.
72
* div_sc is the rearranged equation to calculate a factor from a given clock
73
* ticks / nanoseconds ratio:
75
* factor = (clock_ticks << shift) / nanoseconds
77
static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec,
80
uint64_t tmp = ((uint64_t)ticks) << shift;
83
return (unsigned long) tmp;
87
* Convert nanoseconds based values to clock ticks:
89
* clock_ticks = (nanoseconds * factor) >> shift.
91
static inline unsigned long ns2ticks(unsigned long nsec, int shift,
94
uint64_t tmp = ((uint64_t)nsec * factor) >> shift;
96
return (unsigned long) tmp;
99
static int hpet_next_event(unsigned long delta, int timer)
104
local_irq_save(flags);
105
cnt = hpet_read32(HPET_COUNTER);
107
hpet_write32(cmp, HPET_Tn_CMP(timer));
108
cmp = hpet_read32(HPET_COUNTER);
109
local_irq_restore(flags);
111
/* Are we within two ticks of the deadline passing? Then we may miss. */
112
return ((cmp + 2 - cnt) > delta) ? -ETIME : 0;
115
static int reprogram_hpet_evt_channel(
116
struct hpet_event_channel *ch,
117
s_time_t expire, s_time_t now, int force)
122
if ( (ch->flags & HPET_EVT_DISABLE) || (expire == 0) )
125
if ( unlikely(expire < 0) )
127
printk(KERN_DEBUG "reprogram: expire <= 0\n");
131
delta = expire - now;
132
if ( (delta <= 0) && !force )
135
ch->next_event = expire;
137
if ( expire == STIME_MAX )
139
/* We assume it will take a long time for the timer to wrap. */
140
hpet_write32(0, HPET_Tn_CMP(ch->idx));
144
delta = min_t(int64_t, delta, MAX_DELTA_NS);
145
delta = max_t(int64_t, delta, MIN_DELTA_NS);
146
delta = ns2ticks(delta, ch->shift, ch->mult);
148
ret = hpet_next_event(delta, ch->idx);
149
while ( ret && force )
152
ret = hpet_next_event(delta, ch->idx);
158
static int evt_do_broadcast(cpumask_t mask)
160
int ret = 0, cpu = smp_processor_id();
162
if ( cpu_isset(cpu, mask) )
164
cpu_clear(cpu, mask);
165
raise_softirq(TIMER_SOFTIRQ);
169
if ( !cpus_empty(mask) )
171
cpumask_raise_softirq(mask, TIMER_SOFTIRQ);
177
static void handle_hpet_broadcast(struct hpet_event_channel *ch)
180
s_time_t now, next_event;
183
spin_lock_irq(&ch->lock);
186
ch->next_event = STIME_MAX;
187
next_event = STIME_MAX;
188
mask = (cpumask_t)CPU_MASK_NONE;
191
/* find all expired events */
192
for_each_cpu_mask(cpu, ch->cpumask)
194
if ( per_cpu(timer_deadline_start, cpu) <= now )
196
else if ( per_cpu(timer_deadline_end, cpu) < next_event )
197
next_event = per_cpu(timer_deadline_end, cpu);
200
/* wakeup the cpus which have an expired event. */
201
evt_do_broadcast(mask);
203
if ( next_event != STIME_MAX )
205
if ( reprogram_hpet_evt_channel(ch, next_event, now, 0) )
208
spin_unlock_irq(&ch->lock);
211
static void hpet_interrupt_handler(int irq, void *data,
212
struct cpu_user_regs *regs)
214
struct hpet_event_channel *ch = (struct hpet_event_channel *)data;
216
this_cpu(irq_count)--;
218
if ( !ch->event_handler )
220
printk(XENLOG_WARNING "Spurious HPET timer interrupt on HPET timer %d\n", ch->idx);
224
ch->event_handler(ch);
227
static void hpet_msi_unmask(unsigned int irq)
230
int ch_idx = irq_to_channel(irq);
231
struct hpet_event_channel *ch;
234
ch = &hpet_events[ch_idx];
236
cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
238
hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
241
static void hpet_msi_mask(unsigned int irq)
244
int ch_idx = irq_to_channel(irq);
245
struct hpet_event_channel *ch;
248
ch = &hpet_events[ch_idx];
250
cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
252
hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
255
static void hpet_msi_write(unsigned int irq, struct msi_msg *msg)
257
int ch_idx = irq_to_channel(irq);
258
struct hpet_event_channel *ch;
261
ch = &hpet_events[ch_idx];
263
hpet_write32(msg->data, HPET_Tn_ROUTE(ch->idx));
264
hpet_write32(msg->address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
267
static void hpet_msi_read(unsigned int irq, struct msi_msg *msg)
269
int ch_idx = irq_to_channel(irq);
270
struct hpet_event_channel *ch;
273
ch = &hpet_events[ch_idx];
275
msg->data = hpet_read32(HPET_Tn_ROUTE(ch->idx));
276
msg->address_lo = hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4);
280
static unsigned int hpet_msi_startup(unsigned int irq)
282
hpet_msi_unmask(irq);
286
static void hpet_msi_shutdown(unsigned int irq)
291
static void hpet_msi_ack(unsigned int irq)
293
struct irq_desc *desc = irq_to_desc(irq);
295
irq_complete_move(&desc);
296
move_native_irq(irq);
300
static void hpet_msi_end(unsigned int irq)
304
static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
308
struct irq_desc * desc = irq_to_desc(irq);
309
struct irq_cfg *cfg= desc->chip_data;
311
dest = set_desc_affinity(desc, mask);
312
if (dest == BAD_APICID)
315
hpet_msi_read(irq, &msg);
316
msg.data &= ~MSI_DATA_VECTOR_MASK;
317
msg.data |= MSI_DATA_VECTOR(cfg->vector);
318
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
319
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
320
hpet_msi_write(irq, &msg);
324
* IRQ Chip for MSI HPET Devices,
326
static hw_irq_controller hpet_msi_type = {
327
.typename = "HPET-MSI",
328
.startup = hpet_msi_startup,
329
.shutdown = hpet_msi_shutdown,
330
.enable = hpet_msi_unmask,
331
.disable = hpet_msi_mask,
334
.set_affinity = hpet_msi_set_affinity,
337
static int hpet_setup_msi_irq(unsigned int irq)
341
struct hpet_event_channel *ch = &hpet_events[irq_to_channel(irq)];
343
irq_desc[irq].handler = &hpet_msi_type;
344
ret = request_irq(irq, hpet_interrupt_handler,
349
msi_compose_msg(NULL, irq, &msg);
350
hpet_msi_write(irq, &msg);
355
static int hpet_assign_irq(struct hpet_event_channel *ch)
361
if ( (irq = create_irq()) < 0 )
364
irq_channel[irq] = ch - &hpet_events[0];
368
/* hpet_setup_msi_irq should also be called for S3 resuming */
369
if ( hpet_setup_msi_irq(irq) )
372
irq_channel[irq] = -1;
380
static int hpet_fsb_cap_lookup(void)
383
unsigned int num_chs, num_chs_used;
387
if ( iommu_intremap )
389
printk(XENLOG_INFO "HPET's MSI mode hasn't been supported when "
390
"Interrupt Remapping is enabled.\n");
394
id = hpet_read32(HPET_ID);
396
num_chs = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
397
num_chs++; /* Value read out starts from 0 */
400
for ( i = 0; i < num_chs; i++ )
402
struct hpet_event_channel *ch = &hpet_events[num_chs_used];
403
unsigned long cfg = hpet_read32(HPET_Tn_CFG(i));
405
/* Only consider HPET timer with MSI support */
406
if ( !(cfg & HPET_TN_FSB_CAP) )
412
if ( hpet_assign_irq(ch) )
415
/* set default irq affinity */
416
ch->cpu = num_chs_used;
417
per_cpu(cpu_bc_channel, ch->cpu) = ch;
418
irq_desc[ch->irq].handler->
419
set_affinity(ch->irq, cpumask_of_cpu(ch->cpu));
423
if ( num_chs_used == num_possible_cpus() )
428
"HPET: %d timers in total, %d timers will be used for broadcast\n",
429
num_chs, num_chs_used);
434
static int next_channel;
435
static spinlock_t next_lock = SPIN_LOCK_UNLOCKED;
437
static struct hpet_event_channel *hpet_get_channel(int cpu)
441
struct hpet_event_channel *ch;
443
spin_lock(&next_lock);
444
next = next_channel = (next_channel + 1) % num_hpets_used;
445
spin_unlock(&next_lock);
447
/* try unused channel first */
448
for ( i = next; i < next + num_hpets_used; i++ )
450
ch = &hpet_events[i % num_hpets_used];
451
if ( !test_and_set_bit(HPET_EVT_USED_BIT, &ch->flags) )
458
/* share a in-use channel */
459
ch = &hpet_events[next];
460
if ( !test_and_set_bit(HPET_EVT_USED_BIT, &ch->flags) )
466
static void hpet_attach_channel_share(int cpu, struct hpet_event_channel *ch)
468
per_cpu(cpu_bc_channel, cpu) = ch;
470
/* try to be the channel owner again while holding the lock */
471
if ( !test_and_set_bit(HPET_EVT_USED_BIT, &ch->flags) )
474
if ( ch->cpu != cpu )
477
/* set irq affinity */
478
irq_desc[ch->irq].handler->
479
set_affinity(ch->irq, cpumask_of_cpu(ch->cpu));
482
static void hpet_detach_channel_share(int cpu)
484
struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
486
per_cpu(cpu_bc_channel, cpu) = NULL;
488
if ( cpu != ch->cpu )
491
if ( cpus_empty(ch->cpumask) )
494
clear_bit(HPET_EVT_USED_BIT, &ch->flags);
498
ch->cpu = first_cpu(ch->cpumask);
499
/* set irq affinity */
500
irq_desc[ch->irq].handler->
501
set_affinity(ch->irq, cpumask_of_cpu(ch->cpu));
504
static void (*hpet_attach_channel)(int cpu, struct hpet_event_channel *ch);
505
static void (*hpet_detach_channel)(int cpu);
507
#include <asm/mc146818rtc.h>
509
void (*pv_rtc_handler)(unsigned int port, uint8_t value);
511
static void handle_rtc_once(unsigned int port, uint8_t value)
521
if ( index != RTC_REG_B )
524
/* RTC Reg B, contain PIE/AIE/UIE */
525
if ( value & (RTC_PIE | RTC_AIE | RTC_UIE ) )
527
cpuidle_disable_deep_cstate();
528
pv_rtc_handler = NULL;
532
void hpet_broadcast_init(void)
538
if ( irq_channel == NULL )
540
irq_channel = xmalloc_array(int, nr_irqs);
541
BUG_ON(irq_channel == NULL);
542
for ( i = 0; i < nr_irqs; i++ )
546
hpet_rate = hpet_setup();
547
if ( hpet_rate == 0 )
550
num_hpets_used = hpet_fsb_cap_lookup();
551
if ( num_hpets_used > 0 )
553
/* Stop HPET legacy interrupts */
554
cfg = hpet_read32(HPET_CFG);
555
cfg &= ~HPET_CFG_LEGACY;
556
hpet_write32(cfg, HPET_CFG);
558
for ( i = 0; i < num_hpets_used; i++ )
560
/* set HPET Tn as oneshot */
561
cfg = hpet_read32(HPET_Tn_CFG(hpet_events[i].idx));
562
cfg &= ~HPET_TN_PERIODIC;
563
cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
564
hpet_write32(cfg, HPET_Tn_CFG(hpet_events[i].idx));
566
hpet_events[i].mult = div_sc((unsigned long)hpet_rate,
568
hpet_events[i].shift = 32;
569
hpet_events[i].next_event = STIME_MAX;
570
hpet_events[i].event_handler = handle_hpet_broadcast;
571
spin_lock_init(&hpet_events[i].lock);
574
if ( num_hpets_used < num_possible_cpus() )
576
hpet_attach_channel = hpet_attach_channel_share;
577
hpet_detach_channel = hpet_detach_channel_share;
583
if ( legacy_hpet_event.flags & HPET_EVT_DISABLE )
586
hpet_id = hpet_read32(HPET_ID);
587
if ( !(hpet_id & HPET_ID_LEGSUP) )
590
/* Start HPET legacy interrupts */
591
cfg = hpet_read32(HPET_CFG);
592
cfg |= HPET_CFG_LEGACY;
593
hpet_write32(cfg, HPET_CFG);
595
/* set HPET T0 as oneshot */
596
cfg = hpet_read32(HPET_T0_CFG);
597
cfg &= ~HPET_TN_PERIODIC;
598
cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
599
hpet_write32(cfg, HPET_T0_CFG);
602
* The period is a femto seconds value. We need to calculate the scaled
603
* math multiplication factor for nanosecond to hpet tick conversion.
605
legacy_hpet_event.mult = div_sc((unsigned long)hpet_rate, 1000000000ul, 32);
606
legacy_hpet_event.shift = 32;
607
legacy_hpet_event.next_event = STIME_MAX;
608
legacy_hpet_event.event_handler = handle_hpet_broadcast;
609
legacy_hpet_event.idx = 0;
610
legacy_hpet_event.flags = 0;
611
spin_lock_init(&legacy_hpet_event.lock);
613
for_each_possible_cpu(i)
614
per_cpu(cpu_bc_channel, i) = &legacy_hpet_event;
616
if ( !force_hpet_broadcast )
617
pv_rtc_handler = handle_rtc_once;
620
void hpet_disable_legacy_broadcast(void)
625
spin_lock_irqsave(&legacy_hpet_event.lock, flags);
627
legacy_hpet_event.flags |= HPET_EVT_DISABLE;
629
/* disable HPET T0 */
630
cfg = hpet_read32(HPET_T0_CFG);
631
cfg &= ~HPET_TN_ENABLE;
632
hpet_write32(cfg, HPET_T0_CFG);
634
/* Stop HPET legacy interrupts */
635
cfg = hpet_read32(HPET_CFG);
636
cfg &= ~HPET_CFG_LEGACY;
637
hpet_write32(cfg, HPET_CFG);
639
spin_unlock_irqrestore(&legacy_hpet_event.lock, flags);
641
smp_send_event_check_mask(&cpu_online_map);
644
void hpet_broadcast_enter(void)
646
int cpu = smp_processor_id();
647
struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
649
if ( this_cpu(timer_deadline_start) == 0 )
653
ch = hpet_get_channel(cpu);
656
ASSERT(!local_irq_is_enabled());
657
spin_lock(&ch->lock);
659
if ( hpet_attach_channel )
660
hpet_attach_channel(cpu, ch);
662
/* Cancel any outstanding LAPIC timer event and disable interrupts. */
664
disable_APIC_timer();
666
cpu_set(cpu, ch->cpumask);
668
/* reprogram if current cpu expire time is nearer */
669
if ( this_cpu(timer_deadline_end) < ch->next_event )
670
reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline_end), NOW(), 1);
672
spin_unlock(&ch->lock);
675
void hpet_broadcast_exit(void)
677
int cpu = smp_processor_id();
678
struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
680
if ( this_cpu(timer_deadline_start) == 0 )
685
spin_lock_irq(&ch->lock);
687
if ( cpu_test_and_clear(cpu, ch->cpumask) )
689
/* Reprogram the deadline; trigger timer work now if it has passed. */
691
if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
692
raise_softirq(TIMER_SOFTIRQ);
694
if ( cpus_empty(ch->cpumask) && ch->next_event != STIME_MAX )
695
reprogram_hpet_evt_channel(ch, STIME_MAX, 0, 0);
698
if ( hpet_detach_channel )
699
hpet_detach_channel(cpu);
701
spin_unlock_irq(&ch->lock);
704
int hpet_broadcast_is_available(void)
706
return (legacy_hpet_event.event_handler == handle_hpet_broadcast
707
|| num_hpets_used > 0);
710
int hpet_legacy_irq_tick(void)
712
this_cpu(irq_count)--;
714
if ( !legacy_hpet_event.event_handler )
716
legacy_hpet_event.event_handler(&legacy_hpet_event);
722
static u64 hpet_rate;
723
static u32 system_reset_latch;
724
u32 hpet_id, hpet_period, cfg;
727
if ( system_reset_latch == system_reset_counter )
729
system_reset_latch = system_reset_counter;
731
if ( hpet_address == 0 )
734
set_fixmap_nocache(FIX_HPET_BASE, hpet_address);
736
hpet_id = hpet_read32(HPET_ID);
737
if ( (hpet_id & HPET_ID_REV) == 0 )
739
printk("BAD HPET revision id.\n");
743
/* Check for sane period (100ps <= period <= 100ns). */
744
hpet_period = hpet_read32(HPET_PERIOD);
745
if ( (hpet_period > 100000000) || (hpet_period < 100000) )
747
printk("BAD HPET period %u.\n", hpet_period);
751
cfg = hpet_read32(HPET_CFG);
752
cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
753
hpet_write32(cfg, HPET_CFG);
755
for ( i = 0; i <= ((hpet_id >> 8) & 31); i++ )
757
cfg = hpet_read32(HPET_Tn_CFG(i));
758
cfg &= ~HPET_TN_ENABLE;
759
hpet_write32(cfg, HPET_Tn_CFG(i));
762
cfg = hpet_read32(HPET_CFG);
763
cfg |= HPET_CFG_ENABLE;
764
hpet_write32(cfg, HPET_CFG);
766
hpet_rate = 1000000000000000ULL; /* 10^15 */
767
(void)do_div(hpet_rate, hpet_period);