4
* Copyright (C) 1999 VA Linux Systems
5
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
6
* Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
7
* Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
9
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11
* This program is free software; you can redistribute it and/or modify
12
* it under the terms of the GNU General Public License as published by
13
* the Free Software Foundation; either version 2 of the License, or
14
* (at your option) any later version.
16
* This program is distributed in the hope that it will be useful,
17
* but WITHOUT ANY WARRANTY; without even the implied warranty of
18
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19
* GNU General Public License for more details.
21
* You should have received a copy of the GNU General Public License
22
* along with this program; if not, write to the Free Software
23
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
33
#include <acpi/pdc_intel.h>
35
#include <linux/init.h>
36
#include <linux/numa.h>
37
#include <asm/system.h>
40
#include <xen/nodemask.h>
41
extern int acpi_dmar_init(void);
44
#define COMPILER_DEPENDENT_INT64 long
45
#define COMPILER_DEPENDENT_UINT64 unsigned long
48
* Calling conventions:
50
* ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
51
* ACPI_EXTERNAL_XFACE - External ACPI interfaces
52
* ACPI_INTERNAL_XFACE - Internal ACPI interfaces
53
* ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
55
#define ACPI_SYSTEM_XFACE
56
#define ACPI_EXTERNAL_XFACE
57
#define ACPI_INTERNAL_XFACE
58
#define ACPI_INTERNAL_VAR_XFACE
62
#define ACPI_ASM_MACROS
64
#define ACPI_DISABLE_IRQS() local_irq_disable()
65
#define ACPI_ENABLE_IRQS() local_irq_enable()
66
#define ACPI_FLUSH_CPU_CACHE()
69
ia64_acpi_acquire_global_lock (unsigned int *lock)
71
unsigned int old, new, val;
74
new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
75
val = ia64_cmpxchg4_acq(lock, new, old);
76
} while (unlikely (val != old));
77
return (new < 3) ? -1 : 0;
81
ia64_acpi_release_global_lock (unsigned int *lock)
83
unsigned int old, new, val;
87
val = ia64_cmpxchg4_acq(lock, new, old);
88
} while (unlikely (val != old));
92
#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
93
((Acq) = ia64_acpi_acquire_global_lock(&facs->global_lock))
95
#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
96
((Acq) = ia64_acpi_release_global_lock(&facs->global_lock))
98
#define acpi_disabled 0 /* ACPI always enabled on IA64 */
99
#define acpi_noirq 0 /* ACPI always enabled on IA64 */
100
#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
101
#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
102
#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
103
static inline void disable_acpi(void) { }
105
const char *acpi_get_sysname (void);
106
int acpi_request_vector (u32 int_type);
107
int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
109
/* routines for saving/restoring kernel state */
110
extern int acpi_save_state_mem(void);
111
extern void acpi_restore_state_mem(void);
112
extern unsigned long acpi_wakeup_address;
115
* Record the cpei override flag and current logical cpu. This is
116
* useful for CPU removal.
118
extern unsigned int can_cpei_retarget(void);
119
extern unsigned int is_cpu_cpei_target(unsigned int cpu);
120
extern void set_cpei_target_cpu(unsigned int cpu);
121
extern unsigned int get_cpei_target_cpu(void);
122
extern void prefill_possible_map(void);
123
#ifdef CONFIG_ACPI_HOTPLUG_CPU
124
extern int additional_cpus;
126
#define additional_cpus 0
129
#ifdef CONFIG_ACPI_NUMA
130
#if MAX_NUMNODES > 256
131
#define MAX_PXM_DOMAINS MAX_NUMNODES
133
#define MAX_PXM_DOMAINS (256)
135
extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
136
extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
139
#define acpi_unlazy_tlb(x)
141
#ifdef CONFIG_ACPI_NUMA
142
extern cpumask_t early_cpu_possible_map;
143
#define for_each_possible_early_cpu(cpu) \
144
for_each_cpu_mask((cpu), early_cpu_possible_map)
146
static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
148
int low_cpu, high_cpu;
152
low_cpu = cpus_weight(early_cpu_possible_map);
154
high_cpu = max(low_cpu, min_cpus);
155
high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
157
for (cpu = low_cpu; cpu < high_cpu; cpu++) {
158
cpu_set(cpu, early_cpu_possible_map);
159
if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
160
node_cpuid[cpu].nid = next_nid;
162
if (next_nid >= num_online_nodes())
167
#endif /* CONFIG_ACPI_NUMA */
169
#endif /*__KERNEL__*/
171
#endif /*_ASM_ACPI_H*/