ESXi-4.1-U3

This commit is contained in:
unknown 2015-10-23 15:21:55 -04:00
commit 0d186246d2
1346 changed files with 1702699 additions and 0 deletions

View file

@ -0,0 +1,10 @@
/*
* 8253/8254 Programmable Interval Timer
*/
#ifndef _8253PIT_H
#define _8253PIT_H
#define PIT_TICK_RATE 1193182UL
#endif

View file

@ -0,0 +1,27 @@
#ifndef __X8664_A_OUT_H__
#define __X8664_A_OUT_H__
/* 32bit a.out */
struct exec
{
unsigned int a_info; /* Use macros N_MAGIC, etc for access */
unsigned a_text; /* length of text, in bytes */
unsigned a_data; /* length of data, in bytes */
unsigned a_bss; /* length of uninitialized data area for file, in bytes */
unsigned a_syms; /* length of symbol table data in file, in bytes */
unsigned a_entry; /* start address */
unsigned a_trsize; /* length of relocation info for text, in bytes */
unsigned a_drsize; /* length of relocation info for data, in bytes */
};
#define N_TRSIZE(a) ((a).a_trsize)
#define N_DRSIZE(a) ((a).a_drsize)
#define N_SYMSIZE(a) ((a).a_syms)
#ifdef __KERNEL__
#include <linux/thread_info.h>
#define STACK_TOP TASK_SIZE
#endif
#endif /* __A_OUT_GNU_H__ */

View file

@ -0,0 +1,171 @@
/*
* asm-x86_64/acpi.h
*
* Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#ifndef _ASM_ACPI_H
#define _ASM_ACPI_H
#ifdef __KERNEL__
#include <acpi/pdc_intel.h>
#define COMPILER_DEPENDENT_INT64 long long
#define COMPILER_DEPENDENT_UINT64 unsigned long long
/*
* Calling conventions:
*
* ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
* ACPI_EXTERNAL_XFACE - External ACPI interfaces
* ACPI_INTERNAL_XFACE - Internal ACPI interfaces
* ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
*/
#define ACPI_SYSTEM_XFACE
#define ACPI_EXTERNAL_XFACE
#define ACPI_INTERNAL_XFACE
#define ACPI_INTERNAL_VAR_XFACE
/* Asm macros */
#define ACPI_ASM_MACROS
#define BREAKPOINT3
#define ACPI_DISABLE_IRQS() local_irq_disable()
#define ACPI_ENABLE_IRQS() local_irq_enable()
#define ACPI_FLUSH_CPU_CACHE() wbinvd()
static inline int
__acpi_acquire_global_lock (unsigned int *lock)
{
unsigned int old, new, val;
do {
old = *lock;
new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
val = cmpxchg(lock, old, new);
} while (unlikely (val != old));
return (new < 3) ? -1 : 0;
}
static inline int
__acpi_release_global_lock (unsigned int *lock)
{
unsigned int old, new, val;
do {
old = *lock;
new = old & ~0x3;
val = cmpxchg(lock, old, new);
} while (unlikely (val != old));
return old & 0x1;
}
#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \
((Acq) = __acpi_acquire_global_lock((unsigned int *) GLptr))
#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \
((Acq) = __acpi_release_global_lock((unsigned int *) GLptr))
/*
* Math helper asm macros
*/
#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
asm("divl %2;" \
:"=a"(q32), "=d"(r32) \
:"r"(d32), \
"0"(n_lo), "1"(n_hi))
#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
asm("shrl $1,%2;" \
"rcrl $1,%3;" \
:"=r"(n_hi), "=r"(n_lo) \
:"0"(n_hi), "1"(n_lo))
#ifdef CONFIG_ACPI
extern int acpi_lapic;
extern int acpi_ioapic;
extern int acpi_noirq;
extern int acpi_strict;
extern int acpi_disabled;
extern int acpi_pci_disabled;
extern int acpi_ht;
static inline void disable_acpi(void)
{
acpi_disabled = 1;
acpi_ht = 0;
acpi_pci_disabled = 1;
acpi_noirq = 1;
}
/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
#define FIX_ACPI_PAGES 4
extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
static inline void acpi_disable_pci(void)
{
acpi_pci_disabled = 1;
acpi_noirq_set();
}
extern int acpi_irq_balance_set(char *str);
#else /* !CONFIG_ACPI */
#define acpi_lapic 0
#define acpi_ioapic 0
static inline void acpi_noirq_set(void) { }
static inline void acpi_disable_pci(void) { }
#endif /* !CONFIG_ACPI */
extern int acpi_numa;
extern int acpi_scan_nodes(unsigned long start, unsigned long end);
#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
#ifdef CONFIG_ACPI_SLEEP
/* routines for saving/restoring kernel state */
extern int acpi_save_state_mem(void);
extern void acpi_restore_state_mem(void);
extern unsigned long acpi_wakeup_address;
/* early initialization routine */
extern void acpi_reserve_bootmem(void);
#endif /*CONFIG_ACPI_SLEEP*/
#define boot_cpu_physical_apicid boot_cpu_id
extern int acpi_disabled;
extern int acpi_pci_disabled;
extern u8 x86_acpiid_to_apicid[];
#define ARCH_HAS_POWER_INIT 1
extern int acpi_skip_timer_override;
#endif /*__KERNEL__*/
#endif /*_ASM_ACPI_H*/

View file

@ -0,0 +1,32 @@
#ifndef AGP_H
#define AGP_H 1
#include <asm/cacheflush.h>
/*
* Functions to keep the agpgart mappings coherent.
* The GART gives the CPU a physical alias of memory. The alias is
* mapped uncacheable. Make sure there are no conflicting mappings
* with different cachability attributes for the same page.
*/
int map_page_into_agp(struct page *page);
int unmap_page_from_agp(struct page *page);
#define flush_agp_mappings() global_flush_tlb()
/* Could use CLFLUSH here if the cpu supports it. But then it would
need to be called for each cacheline of the whole page so it may not be
worth it. Would need a page for it. */
#define flush_agp_cache() asm volatile("wbinvd":::"memory")
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif

View file

@ -0,0 +1,136 @@
#ifndef _X86_64_ALTERNATIVE_H
#define _X86_64_ALTERNATIVE_H
#ifdef __KERNEL__
#include <linux/types.h>
#include <asm/cpufeature.h>
struct alt_instr {
u8 *instr; /* original instruction */
u8 *replacement;
u8 cpuid; /* cpuid bit set for replacement */
u8 instrlen; /* length of original instruction */
u8 replacementlen; /* length of new instruction, <= instrlen */
u8 pad[5];
};
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
struct module;
#ifdef CONFIG_SMP
extern void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end,
void *text, void *text_end);
extern void alternatives_smp_module_del(struct module *mod);
extern void alternatives_smp_switch(int smp);
#else
static inline void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end,
void *text, void *text_end) {}
static inline void alternatives_smp_module_del(struct module *mod) {}
static inline void alternatives_smp_switch(int smp) {}
#endif
#endif
/*
* Alternative instructions for different CPU types or capabilities.
*
* This allows to use optimized instructions even on generic binary
* kernels.
*
* length of oldinstr must be longer or equal the length of newinstr
* It can be padded with nops as needed.
*
* For non barrier like inlines please define new variants
* without volatile and memory clobber.
*/
#define alternative(oldinstr, newinstr, feature) \
asm volatile ("661:\n\t" oldinstr "\n662:\n" \
".section .altinstructions,\"a\"\n" \
" .align 8\n" \
" .quad 661b\n" /* label */ \
" .quad 663f\n" /* new instruction */ \
" .byte %c0\n" /* feature bit */ \
" .byte 662b-661b\n" /* sourcelen */ \
" .byte 664f-663f\n" /* replacementlen */ \
".previous\n" \
".section .altinstr_replacement,\"ax\"\n" \
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
".previous" :: "i" (feature) : "memory")
/*
* Alternative inline assembly with input.
*
* Pecularities:
* No memory clobber here.
* Argument numbers start with 1.
* Best is to use constraints that are fixed size (like (%1) ... "r")
* If you use variable sized constraints like "m" or "g" in the
* replacement make sure to pad to the worst case length.
*/
#define alternative_input(oldinstr, newinstr, feature, input...) \
asm volatile ("661:\n\t" oldinstr "\n662:\n" \
".section .altinstructions,\"a\"\n" \
" .align 8\n" \
" .quad 661b\n" /* label */ \
" .quad 663f\n" /* new instruction */ \
" .byte %c0\n" /* feature bit */ \
" .byte 662b-661b\n" /* sourcelen */ \
" .byte 664f-663f\n" /* replacementlen */ \
".previous\n" \
".section .altinstr_replacement,\"ax\"\n" \
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
".previous" :: "i" (feature), ##input)
/* Like alternative_input, but with a single output argument */
#define alternative_io(oldinstr, newinstr, feature, output, input...) \
asm volatile ("661:\n\t" oldinstr "\n662:\n" \
".section .altinstructions,\"a\"\n" \
" .align 8\n" \
" .quad 661b\n" /* label */ \
" .quad 663f\n" /* new instruction */ \
" .byte %c[feat]\n" /* feature bit */ \
" .byte 662b-661b\n" /* sourcelen */ \
" .byte 664f-663f\n" /* replacementlen */ \
".previous\n" \
".section .altinstr_replacement,\"ax\"\n" \
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
".previous" : output : [feat] "i" (feature), ##input)
/*
* Alternative inline assembly for SMP.
*
* The LOCK_PREFIX macro defined here replaces the LOCK and
* LOCK_PREFIX macros used everywhere in the source tree.
*
* SMP alternatives use the same data structures as the other
* alternatives and the X86_FEATURE_UP flag to indicate the case of a
* UP system running a SMP kernel. The existing apply_alternatives()
* works fine for patching a SMP kernel for UP.
*
* The SMP alternative tables can be kept after boot and contain both
* UP and SMP versions of the instructions to allow switching back to
* SMP at runtime, when hotplugging in a new CPU, which is especially
* useful in virtualized environments.
*
* The very common lock prefix is handled as special case in a
* separate table which is a pure address list without replacement ptr
* and size information. That keeps the table sizes small.
*/
#ifdef CONFIG_SMP
#define LOCK_PREFIX \
".section .smp_locks,\"a\"\n" \
" .align 8\n" \
" .quad 661f\n" /* address */ \
".previous\n" \
"661:\n\tlock; "
#else /* ! CONFIG_SMP */
#define LOCK_PREFIX ""
#endif
#endif /* _X86_64_ALTERNATIVE_H */

View file

@ -0,0 +1,113 @@
#ifndef __ASM_APIC_H
#define __ASM_APIC_H
#include <linux/pm.h>
#include <asm/fixmap.h>
#include <asm/apicdef.h>
#include <asm/system.h>
#define Dprintk(x...)
/*
* Debugging macros
*/
#define APIC_QUIET 0
#define APIC_VERBOSE 1
#define APIC_DEBUG 2
extern int apic_verbosity;
extern int apic_runs_main_timer;
/*
* Define the default level of output to be very little
* This can be turned up by using apic=verbose for more
* information and apic=debug for _lots_ of information.
* apic_verbosity is defined in apic.c
*/
#define apic_printk(v, s, a...) do { \
if ((v) <= apic_verbosity) \
printk(s, ##a); \
} while (0)
#ifdef CONFIG_X86_LOCAL_APIC
struct pt_regs;
/*
* Basic functions accessing APICs.
*/
static __inline void apic_write(unsigned long reg, unsigned int v)
{
*((volatile unsigned int *)(APIC_BASE+reg)) = v;
}
static __inline unsigned int apic_read(unsigned long reg)
{
return *((volatile unsigned int *)(APIC_BASE+reg));
}
static __inline__ void apic_wait_icr_idle(void)
{
while (apic_read( APIC_ICR ) & APIC_ICR_BUSY)
cpu_relax();
}
static inline void ack_APIC_irq(void)
{
/*
* ack_APIC_irq() actually gets compiled as a single instruction:
* - a single rmw on Pentium/82489DX
* - a single write on P6+ cores (CONFIG_X86_GOOD_APIC)
* ... yummie.
*/
/* Docs say use 0 for future compatibility */
apic_write(APIC_EOI, 0);
}
extern int get_maxlvt (void);
extern void clear_local_APIC (void);
extern void connect_bsp_APIC (void);
extern void disconnect_bsp_APIC (int virt_wire_setup);
extern void disable_local_APIC (void);
extern int verify_local_APIC (void);
extern void cache_APIC_registers (void);
extern void sync_Arb_IDs (void);
extern void init_bsp_APIC (void);
extern void setup_local_APIC (void);
extern void init_apic_mappings (void);
extern void smp_local_timer_interrupt (struct pt_regs * regs);
extern void setup_boot_APIC_clock (void);
extern void setup_secondary_APIC_clock (void);
extern int APIC_init_uniprocessor (void);
extern void disable_APIC_timer(void);
extern void enable_APIC_timer(void);
extern void clustered_apic_check(void);
extern void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector,
unsigned char msg_type, unsigned char mask);
#define K8_APIC_EXT_LVT_BASE 0x500
#define K8_APIC_EXT_INT_MSG_FIX 0x0
#define K8_APIC_EXT_INT_MSG_SMI 0x2
#define K8_APIC_EXT_INT_MSG_NMI 0x4
#define K8_APIC_EXT_INT_MSG_EXT 0x7
#define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0
extern int disable_timer_pin_1;
#ifndef CONFIG_XEN
void smp_send_timer_broadcast_ipi(void);
void switch_APIC_timer_to_ipi(void *cpumask);
void switch_ipi_to_APIC_timer(void *cpumask);
#define ARCH_APICTIMER_STOPS_ON_C3 1
#endif
#endif /* CONFIG_X86_LOCAL_APIC */
extern unsigned boot_cpu_id;
#endif /* __ASM_APIC_H */

View file

@ -0,0 +1,392 @@
#ifndef __ASM_APICDEF_H
#define __ASM_APICDEF_H
/*
* Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
*
* Alan Cox <Alan.Cox@linux.org>, 1995.
* Ingo Molnar <mingo@redhat.com>, 1999, 2000
*/
#define APIC_DEFAULT_PHYS_BASE 0xfee00000
#define APIC_ID 0x20
#define APIC_ID_MASK (0xFFu<<24)
#define GET_APIC_ID(x) (((x)>>24)&0xFFu)
#define SET_APIC_ID(x) (((x)<<24))
#define APIC_LVR 0x30
#define APIC_LVR_MASK 0xFF00FF
#define GET_APIC_VERSION(x) ((x)&0xFFu)
#define GET_APIC_MAXLVT(x) (((x)>>16)&0xFFu)
#define APIC_INTEGRATED(x) ((x)&0xF0u)
#define APIC_TASKPRI 0x80
#define APIC_TPRI_MASK 0xFFu
#define APIC_ARBPRI 0x90
#define APIC_ARBPRI_MASK 0xFFu
#define APIC_PROCPRI 0xA0
#define APIC_EOI 0xB0
#define APIC_EIO_ACK 0x0 /* Write this to the EOI register */
#define APIC_RRR 0xC0
#define APIC_LDR 0xD0
#define APIC_LDR_MASK (0xFFu<<24)
#define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFFu)
#define SET_APIC_LOGICAL_ID(x) (((x)<<24))
#define APIC_ALL_CPUS 0xFFu
#define APIC_DFR 0xE0
#define APIC_DFR_CLUSTER 0x0FFFFFFFul
#define APIC_DFR_FLAT 0xFFFFFFFFul
#define APIC_SPIV 0xF0
#define APIC_SPIV_FOCUS_DISABLED (1<<9)
#define APIC_SPIV_APIC_ENABLED (1<<8)
#define APIC_ISR 0x100
#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */
#define APIC_TMR 0x180
#define APIC_IRR 0x200
#define APIC_ESR 0x280
#define APIC_ESR_SEND_CS 0x00001
#define APIC_ESR_RECV_CS 0x00002
#define APIC_ESR_SEND_ACC 0x00004
#define APIC_ESR_RECV_ACC 0x00008
#define APIC_ESR_SENDILL 0x00020
#define APIC_ESR_RECVILL 0x00040
#define APIC_ESR_ILLREGA 0x00080
#define APIC_ICR 0x300
#define APIC_DEST_SELF 0x40000
#define APIC_DEST_ALLINC 0x80000
#define APIC_DEST_ALLBUT 0xC0000
#define APIC_ICR_RR_MASK 0x30000
#define APIC_ICR_RR_INVALID 0x00000
#define APIC_ICR_RR_INPROG 0x10000
#define APIC_ICR_RR_VALID 0x20000
#define APIC_INT_LEVELTRIG 0x08000
#define APIC_INT_ASSERT 0x04000
#define APIC_ICR_BUSY 0x01000
#define APIC_DEST_LOGICAL 0x00800
#define APIC_DEST_PHYSICAL 0x00000
#define APIC_DM_FIXED 0x00000
#define APIC_DM_LOWEST 0x00100
#define APIC_DM_SMI 0x00200
#define APIC_DM_REMRD 0x00300
#define APIC_DM_NMI 0x00400
#define APIC_DM_INIT 0x00500
#define APIC_DM_STARTUP 0x00600
#define APIC_DM_EXTINT 0x00700
#define APIC_VECTOR_MASK 0x000FF
#define APIC_ICR2 0x310
#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF)
#define SET_APIC_DEST_FIELD(x) ((x)<<24)
#define APIC_LVTT 0x320
#define APIC_LVTTHMR 0x330
#define APIC_LVTPC 0x340
#define APIC_LVT0 0x350
#define APIC_LVT_TIMER_BASE_MASK (0x3<<18)
#define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3)
#define SET_APIC_TIMER_BASE(x) (((x)<<18))
#define APIC_TIMER_BASE_CLKIN 0x0
#define APIC_TIMER_BASE_TMBASE 0x1
#define APIC_TIMER_BASE_DIV 0x2
#define APIC_LVT_TIMER_PERIODIC (1<<17)
#define APIC_LVT_MASKED (1<<16)
#define APIC_LVT_LEVEL_TRIGGER (1<<15)
#define APIC_LVT_REMOTE_IRR (1<<14)
#define APIC_INPUT_POLARITY (1<<13)
#define APIC_SEND_PENDING (1<<12)
#define APIC_MODE_MASK 0x700
#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7)
#define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8))
#define APIC_MODE_FIXED 0x0
#define APIC_MODE_NMI 0x4
#define APIC_MODE_EXTINT 0x7
#define APIC_LVT1 0x360
#define APIC_LVTERR 0x370
#define APIC_TMICT 0x380
#define APIC_TMCCT 0x390
#define APIC_TDCR 0x3E0
#define APIC_TDR_DIV_TMBASE (1<<2)
#define APIC_TDR_DIV_1 0xB
#define APIC_TDR_DIV_2 0x0
#define APIC_TDR_DIV_4 0x1
#define APIC_TDR_DIV_8 0x2
#define APIC_TDR_DIV_16 0x3
#define APIC_TDR_DIV_32 0x8
#define APIC_TDR_DIV_64 0x9
#define APIC_TDR_DIV_128 0xA
#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
#define MAX_IO_APICS 128
#define MAX_LOCAL_APIC 256
/*
* All x86-64 systems are xAPIC compatible.
* In the following, "apicid" is a physical APIC ID.
*/
#define XAPIC_DEST_CPUS_SHIFT 4
#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
#define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
#define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT)
#define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK)
#define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT)
/*
* the local APIC register structure, memory mapped. Not terribly well
* tested, but we might eventually use this one in the future - the
* problem why we cannot use it right now is the P5 APIC, it has an
* errata which cannot take 8-bit reads and writes, only 32-bit ones ...
*/
#define u32 unsigned int
struct local_apic {
/*000*/ struct { u32 __reserved[4]; } __reserved_01;
/*010*/ struct { u32 __reserved[4]; } __reserved_02;
/*020*/ struct { /* APIC ID Register */
u32 __reserved_1 : 24,
phys_apic_id : 4,
__reserved_2 : 4;
u32 __reserved[3];
} id;
/*030*/ const
struct { /* APIC Version Register */
u32 version : 8,
__reserved_1 : 8,
max_lvt : 8,
__reserved_2 : 8;
u32 __reserved[3];
} version;
/*040*/ struct { u32 __reserved[4]; } __reserved_03;
/*050*/ struct { u32 __reserved[4]; } __reserved_04;
/*060*/ struct { u32 __reserved[4]; } __reserved_05;
/*070*/ struct { u32 __reserved[4]; } __reserved_06;
/*080*/ struct { /* Task Priority Register */
u32 priority : 8,
__reserved_1 : 24;
u32 __reserved_2[3];
} tpr;
/*090*/ const
struct { /* Arbitration Priority Register */
u32 priority : 8,
__reserved_1 : 24;
u32 __reserved_2[3];
} apr;
/*0A0*/ const
struct { /* Processor Priority Register */
u32 priority : 8,
__reserved_1 : 24;
u32 __reserved_2[3];
} ppr;
/*0B0*/ struct { /* End Of Interrupt Register */
u32 eoi;
u32 __reserved[3];
} eoi;
/*0C0*/ struct { u32 __reserved[4]; } __reserved_07;
/*0D0*/ struct { /* Logical Destination Register */
u32 __reserved_1 : 24,
logical_dest : 8;
u32 __reserved_2[3];
} ldr;
/*0E0*/ struct { /* Destination Format Register */
u32 __reserved_1 : 28,
model : 4;
u32 __reserved_2[3];
} dfr;
/*0F0*/ struct { /* Spurious Interrupt Vector Register */
u32 spurious_vector : 8,
apic_enabled : 1,
focus_cpu : 1,
__reserved_2 : 22;
u32 __reserved_3[3];
} svr;
/*100*/ struct { /* In Service Register */
/*170*/ u32 bitfield;
u32 __reserved[3];
} isr [8];
/*180*/ struct { /* Trigger Mode Register */
/*1F0*/ u32 bitfield;
u32 __reserved[3];
} tmr [8];
/*200*/ struct { /* Interrupt Request Register */
/*270*/ u32 bitfield;
u32 __reserved[3];
} irr [8];
/*280*/ union { /* Error Status Register */
struct {
u32 send_cs_error : 1,
receive_cs_error : 1,
send_accept_error : 1,
receive_accept_error : 1,
__reserved_1 : 1,
send_illegal_vector : 1,
receive_illegal_vector : 1,
illegal_register_address : 1,
__reserved_2 : 24;
u32 __reserved_3[3];
} error_bits;
struct {
u32 errors;
u32 __reserved_3[3];
} all_errors;
} esr;
/*290*/ struct { u32 __reserved[4]; } __reserved_08;
/*2A0*/ struct { u32 __reserved[4]; } __reserved_09;
/*2B0*/ struct { u32 __reserved[4]; } __reserved_10;
/*2C0*/ struct { u32 __reserved[4]; } __reserved_11;
/*2D0*/ struct { u32 __reserved[4]; } __reserved_12;
/*2E0*/ struct { u32 __reserved[4]; } __reserved_13;
/*2F0*/ struct { u32 __reserved[4]; } __reserved_14;
/*300*/ struct { /* Interrupt Command Register 1 */
u32 vector : 8,
delivery_mode : 3,
destination_mode : 1,
delivery_status : 1,
__reserved_1 : 1,
level : 1,
trigger : 1,
__reserved_2 : 2,
shorthand : 2,
__reserved_3 : 12;
u32 __reserved_4[3];
} icr1;
/*310*/ struct { /* Interrupt Command Register 2 */
union {
u32 __reserved_1 : 24,
phys_dest : 4,
__reserved_2 : 4;
u32 __reserved_3 : 24,
logical_dest : 8;
} dest;
u32 __reserved_4[3];
} icr2;
/*320*/ struct { /* LVT - Timer */
u32 vector : 8,
__reserved_1 : 4,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
timer_mode : 1,
__reserved_3 : 14;
u32 __reserved_4[3];
} lvt_timer;
/*330*/ struct { /* LVT - Thermal Sensor */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
__reserved_3 : 15;
u32 __reserved_4[3];
} lvt_thermal;
/*340*/ struct { /* LVT - Performance Counter */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
__reserved_3 : 15;
u32 __reserved_4[3];
} lvt_pc;
/*350*/ struct { /* LVT - LINT0 */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
polarity : 1,
remote_irr : 1,
trigger : 1,
mask : 1,
__reserved_2 : 15;
u32 __reserved_3[3];
} lvt_lint0;
/*360*/ struct { /* LVT - LINT1 */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
polarity : 1,
remote_irr : 1,
trigger : 1,
mask : 1,
__reserved_2 : 15;
u32 __reserved_3[3];
} lvt_lint1;
/*370*/ struct { /* LVT - Error */
u32 vector : 8,
__reserved_1 : 4,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
__reserved_3 : 15;
u32 __reserved_4[3];
} lvt_error;
/*380*/ struct { /* Timer Initial Count Register */
u32 initial_count;
u32 __reserved_2[3];
} timer_icr;
/*390*/ const
struct { /* Timer Current Count Register */
u32 curr_count;
u32 __reserved_2[3];
} timer_ccr;
/*3A0*/ struct { u32 __reserved[4]; } __reserved_16;
/*3B0*/ struct { u32 __reserved[4]; } __reserved_17;
/*3C0*/ struct { u32 __reserved[4]; } __reserved_18;
/*3D0*/ struct { u32 __reserved[4]; } __reserved_19;
/*3E0*/ struct { /* Timer Divide Configuration Register */
u32 divisor : 4,
__reserved_1 : 28;
u32 __reserved_2[3];
} timer_dcr;
/*3F0*/ struct { u32 __reserved[4]; } __reserved_20;
} __attribute__ ((packed));
#undef u32
#define BAD_APICID 0xFFu
#endif

View file

@ -0,0 +1,608 @@
/*
* Portions Copyright 2008, 2010 VMware, Inc.
*/
#ifndef __ARCH_X86_64_ATOMIC__
#define __ARCH_X86_64_ATOMIC__
#include <asm/alternative.h>
#if defined(__VMKLNX__)
#include "vmkapi.h"
#endif /* defined(__VMKLNX__) */
/* atomic_t should be 32 bit signed type */
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
#ifdef CONFIG_SMP
#define LOCK "lock ; "
#else
#define LOCK ""
#endif
/*
* Make sure gcc doesn't try to be clever and move things around
* on us. We need to use _exactly_ the address the user gave us,
* not some alias that contains the same information.
*/
typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) { (i) }
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v.
*/
#define atomic_read(v) ((v)->counter)
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
#define atomic_set(v,i) (((v)->counter) = (i))
/**
* atomic_add - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v.
*/
/* _VMKLNX_CODECHECK_: atomic_add */
static __inline__ void atomic_add(int i, atomic_t *v)
{
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "addl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
}
/**
* atomic_sub - subtract the atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v.
*/
/* _VMKLNX_CODECHECK_: atomic_sub */
static __inline__ void atomic_sub(int i, atomic_t *v)
{
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "subl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
}
/**
* atomic_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
{
unsigned char c;
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "subl %2,%0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
return c;
}
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1.
*/
/* _VMKLNX_CODECHECK_: atomic_inc */
static __inline__ void atomic_inc(atomic_t *v)
{
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "incl %0"
:"=m" (v->counter)
:"m" (v->counter));
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
}
/**
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1.
*/
/* _VMKLNX_CODECHECK_: atomic_dec */
static __inline__ void atomic_dec(atomic_t *v)
{
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "decl %0"
:"=m" (v->counter)
:"m" (v->counter));
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
}
/**
* atomic_dec_and_test - decrement and test
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
/* _VMKLNX_CODECHECK_: atomic_dec_and_test */
static __inline__ int atomic_dec_and_test(atomic_t *v)
{
unsigned char c;
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "decl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
return c != 0;
}
/**
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
static __inline__ int atomic_inc_and_test(atomic_t *v)
{
unsigned char c;
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "incl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
return c != 0;
}
/**
* atomic_add_negative - add and test if negative
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static __inline__ int atomic_add_negative(int i, atomic_t *v)
{
unsigned char c;
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "addl %2,%0; sets %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
return c;
}
/**
* atomic_add_return - add and return
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v and returns @i + @v
*/
/* _VMKLNX_CODECHECK_: atomic_add_return */
static __inline__ int atomic_add_return(int i, atomic_t *v)
{
int __i = i;
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "xaddl %0, %1;"
:"=r"(i)
:"m"(v->counter), "0"(i));
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
return i + __i;
}
static __inline__ int atomic_sub_return(int i, atomic_t *v)
{
return atomic_add_return(-i,v);
}
/**
* atomic_inc_return - increment by 1 and return
* @v: integer value to increment
*
* Atomically increments @v by 1 returns @v + 1
*
* SYNOPSIS:
* #define atomic_inc_return(v)
*
* RETURN VALUE:
* Returns @v + 1
*/
/* _VMKLNX_CODECHECK_: atomic_inc_return */
#define atomic_inc_return(v) (atomic_add_return(1,v))
#define atomic_dec_return(v) (atomic_sub_return(1,v))
/* An 64bit atomic type */
typedef struct { volatile long counter; } atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
/**
* atomic64_read - read atomic64 variable
* @v: pointer of type atomic64_t
*
* Atomically reads the value of @v.
* Doesn't imply a read memory barrier.
*/
#define atomic64_read(v) ((v)->counter)
/**
* atomic64_set - set atomic64 variable
* @v: pointer to type atomic64_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
#if defined(__VMKLNX__)
static __inline__ void atomic64_set(atomic64_t *v, long i)
{
/*
* Ensure that we do a single movq. Without this, the compiler
* may do write with a constant as two movl operations.
*/
__asm__ __volatile__(
"movq %1, %0"
: "+m" (v->counter)
: "r" (i)
);
}
#else /* !defined(__VMKLNX__) */
#define atomic64_set(v,i) (((v)->counter) = (i))
#endif /* defined(__VMKLNX__) */
/**
* atomic64_add - add integer to atomic64 variable
* @i: integer value to add
* @v: pointer to type atomic64_t
*
* Atomically adds @i to @v.
*/
static __inline__ void atomic64_add(long i, atomic64_t *v)
{
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "addq %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
}
/**
* atomic64_sub - subtract the atomic64 variable
* @i: integer value to subtract
* @v: pointer to type atomic64_t
*
* Atomically subtracts @i from @v.
*/
static __inline__ void atomic64_sub(long i, atomic64_t *v)
{
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "subq %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
}
/**
* atomic64_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer to type atomic64_t
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
{
unsigned char c;
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "subq %2,%0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
return c;
}
/**
* atomic64_inc - increment atomic64 variable
* @v: pointer to type atomic64_t
*
* Atomically increments @v by 1.
*/
static __inline__ void atomic64_inc(atomic64_t *v)
{
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "incq %0"
:"=m" (v->counter)
:"m" (v->counter));
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
}
/**
* atomic64_dec - decrement atomic64 variable
* @v: pointer to type atomic64_t
*
* Atomically decrements @v by 1.
*/
static __inline__ void atomic64_dec(atomic64_t *v)
{
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "decq %0"
:"=m" (v->counter)
:"m" (v->counter));
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
}
/**
* atomic64_dec_and_test - decrement and test
* @v: pointer to type atomic64_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
static __inline__ int atomic64_dec_and_test(atomic64_t *v)
{
unsigned char c;
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "decq %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
return c != 0;
}
/**
* atomic64_inc_and_test - increment and test
* @v: pointer to type atomic64_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
static __inline__ int atomic64_inc_and_test(atomic64_t *v)
{
unsigned char c;
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "incq %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
return c != 0;
}
/**
* atomic64_add_negative - add and test if negative
* @i: integer value to add
* @v: pointer to type atomic64_t
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
{
unsigned char c;
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "addq %2,%0; sets %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
return c;
}
/**
* atomic64_add_return - add and return
* @i: integer value to add
* @v: pointer to type atomic64_t
*
* Atomically adds @i to @v and returns @i + @v
*/
static __inline__ long atomic64_add_return(long i, atomic64_t *v)
{
long __i = i;
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "xaddq %0, %1;"
:"=r"(i)
:"m"(v->counter), "0"(i));
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
return i + __i;
}
static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
{
return atomic64_add_return(-i,v);
}
#define atomic64_inc_return(v) (atomic64_add_return(1,v))
#define atomic64_dec_return(v) (atomic64_sub_return(1,v))
#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
* atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
#define atomic_add_unless(v, a, u) \
({ \
int c, old; \
c = atomic_read(v); \
for (;;) { \
if (unlikely(c == (u))) \
break; \
old = atomic_cmpxchg((v), c, c + (a)); \
if (likely(old == c)) \
break; \
c = old; \
} \
c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/* These are x86-specific, used by some header files */
#if defined(__VMKLNX__)
#define atomic_clear_mask(mask, addr) \
do { \
vmk_AtomicPrologue(); \
__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
: : "r" (~(mask)),"m" (*addr) : "memory") ; \
vmk_AtomicEpilogue(); \
} while (0)
#define atomic_set_mask(mask, addr) \
do { \
vmk_AtomicPrologue(); \
__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
: : "r" ((unsigned)mask),"m" (*(addr)) : "memory"); \
vmk_AtomicEpilogue(); \
} while (0)
#else /* !defined(__VMKLNX__) */
#define atomic_clear_mask(mask, addr) \
__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
: : "r" (~(mask)),"m" (*addr) : "memory")
#define atomic_set_mask(mask, addr) \
__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
: : "r" ((unsigned)mask),"m" (*(addr)) : "memory")
#endif /* defined(__VMKLNX__) */
/* Atomic operations are already serializing on x86 */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif

View file

@ -0,0 +1,4 @@
#ifndef __ASM_X86_64_AUXVEC_H
#define __ASM_X86_64_AUXVEC_H
#endif

View file

@ -0,0 +1,729 @@
/*
* Portions Copyright 2008, 2009 VMware, Inc.
*/