blob: 87927eb824ccd9ed2f14d83434a2fb2ec793aaee [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Copyright (C) 1999 VA Linux Systems
4 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
5 * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
6 * Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 */
8
9#ifndef _ASM_ACPI_H
10#define _ASM_ACPI_H
11
12#ifdef __KERNEL__
13
Venki Pallipadi35729442007-05-22 15:36:57 -070014#include <acpi/pdc_intel.h>
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/init.h>
17#include <linux/numa.h>
holt@sgi.com2c6e6db2008-04-03 15:17:13 -050018#include <asm/numa.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
Christoph Hellwig2e0f2b12019-08-13 09:25:05 +020020
Baoquan He247dba52014-05-05 12:48:25 +080021extern int acpi_lapic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#define acpi_disabled 0 /* ACPI always enabled on IA64 */
23#define acpi_noirq 0 /* ACPI always enabled on IA64 */
24#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
25#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
Graeme Gregoryb50154d2014-07-18 18:02:53 +080026
27static inline bool acpi_has_cpu_in_madt(void)
28{
29 return !!acpi_lapic;
30}
Christoph Hellwig2e0f2b12019-08-13 09:25:05 +020031
Alexey Starikovskiyc1c30632007-11-26 20:42:19 +010032#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
Linus Torvalds1da177e2005-04-16 15:20:36 -070033static inline void disable_acpi(void) { }
34
Linus Torvalds1da177e2005-04-16 15:20:36 -070035int acpi_request_vector (u32 int_type);
36int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
37
Rafael J. Wysockif1a20032011-02-08 23:42:22 +010038/* Low-level suspend routine. */
39extern int acpi_suspend_lowlevel(void);
Rafael J. Wysockic41b93f2011-02-08 23:41:35 +010040
Sean Christopherson8c53b312019-11-26 08:54:16 -080041static inline unsigned long acpi_get_wakeup_address(void)
42{
43 return 0;
44}
Len Browne8b2fd02007-07-24 22:26:33 -040045
Ashok Raj55e59c52005-03-31 22:51:10 -050046/*
47 * Record the cpei override flag and current logical cpu. This is
48 * useful for CPU removal.
49 */
50extern unsigned int can_cpei_retarget(void);
51extern unsigned int is_cpu_cpei_target(unsigned int cpu);
52extern void set_cpei_target_cpu(unsigned int cpu);
53extern unsigned int get_cpei_target_cpu(void);
Ashok Raja6b14fa2006-02-14 15:01:12 -080054extern void prefill_possible_map(void);
holt@sgi.com2c6e6db2008-04-03 15:17:13 -050055#ifdef CONFIG_ACPI_HOTPLUG_CPU
Ashok Raja6b14fa2006-02-14 15:01:12 -080056extern int additional_cpus;
holt@sgi.com2c6e6db2008-04-03 15:17:13 -050057#else
58#define additional_cpus 0
59#endif
Ashok Raj55e59c52005-03-31 22:51:10 -050060
Linus Torvalds1da177e2005-04-16 15:20:36 -070061#ifdef CONFIG_ACPI_NUMA
Jack Steiner0d9adec2006-04-18 15:00:45 -050062#if MAX_NUMNODES > 256
63#define MAX_PXM_DOMAINS MAX_NUMNODES
Jack Steiner3ad5ef82006-03-02 16:02:25 -060064#else
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#define MAX_PXM_DOMAINS (256)
Jack Steiner3ad5ef82006-03-02 16:02:25 -060066#endif
Greg Kroah-Hartman5b5e76e2012-12-21 14:05:13 -080067extern int pxm_to_nid_map[MAX_PXM_DOMAINS];
Linus Torvalds1da177e2005-04-16 15:20:36 -070068extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
69#endif
70
Alex Chiang1d9cb472009-12-20 12:19:14 -070071static inline bool arch_has_acpi_pdc(void) { return true; }
Alex Chiang6c5807d2009-12-20 12:19:29 -070072static inline void arch_acpi_set_pdc_bits(u32 *buf)
73{
74 buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
75}
Alex Chiang1d9cb472009-12-20 12:19:14 -070076
holt@sgi.com2c6e6db2008-04-03 15:17:13 -050077#ifdef CONFIG_ACPI_NUMA
78extern cpumask_t early_cpu_possible_map;
79#define for_each_possible_early_cpu(cpu) \
Rusty Russell5d2068d2015-03-05 10:49:16 +103080 for_each_cpu((cpu), &early_cpu_possible_map)
holt@sgi.com2c6e6db2008-04-03 15:17:13 -050081
82static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
83{
84 int low_cpu, high_cpu;
85 int cpu;
86 int next_nid = 0;
87
Rusty Russell5d2068d2015-03-05 10:49:16 +103088 low_cpu = cpumask_weight(&early_cpu_possible_map);
holt@sgi.com2c6e6db2008-04-03 15:17:13 -050089
90 high_cpu = max(low_cpu, min_cpus);
91 high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
92
93 for (cpu = low_cpu; cpu < high_cpu; cpu++) {
Rusty Russell5d2068d2015-03-05 10:49:16 +103094 cpumask_set_cpu(cpu, &early_cpu_possible_map);
holt@sgi.com2c6e6db2008-04-03 15:17:13 -050095 if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
96 node_cpuid[cpu].nid = next_nid;
97 next_nid++;
98 if (next_nid >= num_online_nodes())
99 next_nid = 0;
100 }
101 }
102}
Robert Richter312521d2016-05-24 15:35:34 -0700103
104extern void acpi_numa_fixup(void);
105
holt@sgi.com2c6e6db2008-04-03 15:17:13 -0500106#endif /* CONFIG_ACPI_NUMA */
107
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#endif /*__KERNEL__*/
109
110#endif /*_ASM_ACPI_H*/