ESXi-4.1-U3

This commit is contained in:
unknown 2015-10-23 15:21:55 -04:00
commit 0d186246d2
1346 changed files with 1702699 additions and 0 deletions

View file

@ -0,0 +1,10 @@
/*
* 8253/8254 Programmable Interval Timer
*/
#ifndef _8253PIT_H
#define _8253PIT_H
#define PIT_TICK_RATE 1193182UL
#endif

View file

@ -0,0 +1,27 @@
#ifndef __X8664_A_OUT_H__
#define __X8664_A_OUT_H__
/* 32bit a.out */
struct exec
{
unsigned int a_info; /* Use macros N_MAGIC, etc for access */
unsigned a_text; /* length of text, in bytes */
unsigned a_data; /* length of data, in bytes */
unsigned a_bss; /* length of uninitialized data area for file, in bytes */
unsigned a_syms; /* length of symbol table data in file, in bytes */
unsigned a_entry; /* start address */
unsigned a_trsize; /* length of relocation info for text, in bytes */
unsigned a_drsize; /* length of relocation info for data, in bytes */
};
#define N_TRSIZE(a) ((a).a_trsize)
#define N_DRSIZE(a) ((a).a_drsize)
#define N_SYMSIZE(a) ((a).a_syms)
#ifdef __KERNEL__
#include <linux/thread_info.h>
#define STACK_TOP TASK_SIZE
#endif
#endif /* __A_OUT_GNU_H__ */

View file

@ -0,0 +1,171 @@
/*
* asm-x86_64/acpi.h
*
* Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#ifndef _ASM_ACPI_H
#define _ASM_ACPI_H
#ifdef __KERNEL__
#include <acpi/pdc_intel.h>
#define COMPILER_DEPENDENT_INT64 long long
#define COMPILER_DEPENDENT_UINT64 unsigned long long
/*
* Calling conventions:
*
* ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
* ACPI_EXTERNAL_XFACE - External ACPI interfaces
* ACPI_INTERNAL_XFACE - Internal ACPI interfaces
* ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
*/
#define ACPI_SYSTEM_XFACE
#define ACPI_EXTERNAL_XFACE
#define ACPI_INTERNAL_XFACE
#define ACPI_INTERNAL_VAR_XFACE
/* Asm macros */
#define ACPI_ASM_MACROS
#define BREAKPOINT3
#define ACPI_DISABLE_IRQS() local_irq_disable()
#define ACPI_ENABLE_IRQS() local_irq_enable()
#define ACPI_FLUSH_CPU_CACHE() wbinvd()
static inline int
__acpi_acquire_global_lock (unsigned int *lock)
{
unsigned int old, new, val;
do {
old = *lock;
new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
val = cmpxchg(lock, old, new);
} while (unlikely (val != old));
return (new < 3) ? -1 : 0;
}
static inline int
__acpi_release_global_lock (unsigned int *lock)
{
unsigned int old, new, val;
do {
old = *lock;
new = old & ~0x3;
val = cmpxchg(lock, old, new);
} while (unlikely (val != old));
return old & 0x1;
}
#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \
((Acq) = __acpi_acquire_global_lock((unsigned int *) GLptr))
#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \
((Acq) = __acpi_release_global_lock((unsigned int *) GLptr))
/*
* Math helper asm macros
*/
#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
asm("divl %2;" \
:"=a"(q32), "=d"(r32) \
:"r"(d32), \
"0"(n_lo), "1"(n_hi))
#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
asm("shrl $1,%2;" \
"rcrl $1,%3;" \
:"=r"(n_hi), "=r"(n_lo) \
:"0"(n_hi), "1"(n_lo))
#ifdef CONFIG_ACPI
extern int acpi_lapic;
extern int acpi_ioapic;
extern int acpi_noirq;
extern int acpi_strict;
extern int acpi_disabled;
extern int acpi_pci_disabled;
extern int acpi_ht;
static inline void disable_acpi(void)
{
acpi_disabled = 1;
acpi_ht = 0;
acpi_pci_disabled = 1;
acpi_noirq = 1;
}
/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
#define FIX_ACPI_PAGES 4
extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
static inline void acpi_disable_pci(void)
{
acpi_pci_disabled = 1;
acpi_noirq_set();
}
extern int acpi_irq_balance_set(char *str);
#else /* !CONFIG_ACPI */
#define acpi_lapic 0
#define acpi_ioapic 0
static inline void acpi_noirq_set(void) { }
static inline void acpi_disable_pci(void) { }
#endif /* !CONFIG_ACPI */
extern int acpi_numa;
extern int acpi_scan_nodes(unsigned long start, unsigned long end);
#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
#ifdef CONFIG_ACPI_SLEEP
/* routines for saving/restoring kernel state */
extern int acpi_save_state_mem(void);
extern void acpi_restore_state_mem(void);
extern unsigned long acpi_wakeup_address;
/* early initialization routine */
extern void acpi_reserve_bootmem(void);
#endif /*CONFIG_ACPI_SLEEP*/
#define boot_cpu_physical_apicid boot_cpu_id
extern int acpi_disabled;
extern int acpi_pci_disabled;
extern u8 x86_acpiid_to_apicid[];
#define ARCH_HAS_POWER_INIT 1
extern int acpi_skip_timer_override;
#endif /*__KERNEL__*/
#endif /*_ASM_ACPI_H*/

View file

@ -0,0 +1,32 @@
#ifndef AGP_H
#define AGP_H 1
#include <asm/cacheflush.h>
/*
* Functions to keep the agpgart mappings coherent.
* The GART gives the CPU a physical alias of memory. The alias is
* mapped uncacheable. Make sure there are no conflicting mappings
* with different cachability attributes for the same page.
*/
int map_page_into_agp(struct page *page);
int unmap_page_from_agp(struct page *page);
#define flush_agp_mappings() global_flush_tlb()
/* Could use CLFLUSH here if the cpu supports it. But then it would
need to be called for each cacheline of the whole page so it may not be
worth it. Would need a page for it. */
#define flush_agp_cache() asm volatile("wbinvd":::"memory")
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif

View file

@ -0,0 +1,136 @@
#ifndef _X86_64_ALTERNATIVE_H
#define _X86_64_ALTERNATIVE_H
#ifdef __KERNEL__
#include <linux/types.h>
#include <asm/cpufeature.h>
struct alt_instr {
u8 *instr; /* original instruction */
u8 *replacement;
u8 cpuid; /* cpuid bit set for replacement */
u8 instrlen; /* length of original instruction */
u8 replacementlen; /* length of new instruction, <= instrlen */
u8 pad[5];
};
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
struct module;
#ifdef CONFIG_SMP
extern void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end,
void *text, void *text_end);
extern void alternatives_smp_module_del(struct module *mod);
extern void alternatives_smp_switch(int smp);
#else
static inline void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end,
void *text, void *text_end) {}
static inline void alternatives_smp_module_del(struct module *mod) {}
static inline void alternatives_smp_switch(int smp) {}
#endif
#endif
/*
* Alternative instructions for different CPU types or capabilities.
*
* This allows to use optimized instructions even on generic binary
* kernels.
*
* length of oldinstr must be longer or equal the length of newinstr
* It can be padded with nops as needed.
*
* For non barrier like inlines please define new variants
* without volatile and memory clobber.
*/
#define alternative(oldinstr, newinstr, feature) \
asm volatile ("661:\n\t" oldinstr "\n662:\n" \
".section .altinstructions,\"a\"\n" \
" .align 8\n" \
" .quad 661b\n" /* label */ \
" .quad 663f\n" /* new instruction */ \
" .byte %c0\n" /* feature bit */ \
" .byte 662b-661b\n" /* sourcelen */ \
" .byte 664f-663f\n" /* replacementlen */ \
".previous\n" \
".section .altinstr_replacement,\"ax\"\n" \
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
".previous" :: "i" (feature) : "memory")
/*
* Alternative inline assembly with input.
*
* Pecularities:
* No memory clobber here.
* Argument numbers start with 1.
* Best is to use constraints that are fixed size (like (%1) ... "r")
* If you use variable sized constraints like "m" or "g" in the
* replacement make sure to pad to the worst case length.
*/
#define alternative_input(oldinstr, newinstr, feature, input...) \
asm volatile ("661:\n\t" oldinstr "\n662:\n" \
".section .altinstructions,\"a\"\n" \
" .align 8\n" \
" .quad 661b\n" /* label */ \
" .quad 663f\n" /* new instruction */ \
" .byte %c0\n" /* feature bit */ \
" .byte 662b-661b\n" /* sourcelen */ \
" .byte 664f-663f\n" /* replacementlen */ \
".previous\n" \
".section .altinstr_replacement,\"ax\"\n" \
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
".previous" :: "i" (feature), ##input)
/* Like alternative_input, but with a single output argument */
#define alternative_io(oldinstr, newinstr, feature, output, input...) \
asm volatile ("661:\n\t" oldinstr "\n662:\n" \
".section .altinstructions,\"a\"\n" \
" .align 8\n" \
" .quad 661b\n" /* label */ \
" .quad 663f\n" /* new instruction */ \
" .byte %c[feat]\n" /* feature bit */ \
" .byte 662b-661b\n" /* sourcelen */ \
" .byte 664f-663f\n" /* replacementlen */ \
".previous\n" \
".section .altinstr_replacement,\"ax\"\n" \
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
".previous" : output : [feat] "i" (feature), ##input)
/*
* Alternative inline assembly for SMP.
*
* The LOCK_PREFIX macro defined here replaces the LOCK and
* LOCK_PREFIX macros used everywhere in the source tree.
*
* SMP alternatives use the same data structures as the other
* alternatives and the X86_FEATURE_UP flag to indicate the case of a
* UP system running a SMP kernel. The existing apply_alternatives()
* works fine for patching a SMP kernel for UP.
*
* The SMP alternative tables can be kept after boot and contain both
* UP and SMP versions of the instructions to allow switching back to
* SMP at runtime, when hotplugging in a new CPU, which is especially
* useful in virtualized environments.
*
* The very common lock prefix is handled as special case in a
* separate table which is a pure address list without replacement ptr
* and size information. That keeps the table sizes small.
*/
#ifdef CONFIG_SMP
#define LOCK_PREFIX \
".section .smp_locks,\"a\"\n" \
" .align 8\n" \
" .quad 661f\n" /* address */ \
".previous\n" \
"661:\n\tlock; "
#else /* ! CONFIG_SMP */
#define LOCK_PREFIX ""
#endif
#endif /* _X86_64_ALTERNATIVE_H */

View file

@ -0,0 +1,113 @@
#ifndef __ASM_APIC_H
#define __ASM_APIC_H
#include <linux/pm.h>
#include <asm/fixmap.h>
#include <asm/apicdef.h>
#include <asm/system.h>
#define Dprintk(x...)
/*
* Debugging macros
*/
#define APIC_QUIET 0
#define APIC_VERBOSE 1
#define APIC_DEBUG 2
extern int apic_verbosity;
extern int apic_runs_main_timer;
/*
* Define the default level of output to be very little
* This can be turned up by using apic=verbose for more
* information and apic=debug for _lots_ of information.
* apic_verbosity is defined in apic.c
*/
#define apic_printk(v, s, a...) do { \
if ((v) <= apic_verbosity) \
printk(s, ##a); \
} while (0)
#ifdef CONFIG_X86_LOCAL_APIC
struct pt_regs;
/*
* Basic functions accessing APICs.
*/
static __inline void apic_write(unsigned long reg, unsigned int v)
{
*((volatile unsigned int *)(APIC_BASE+reg)) = v;
}
static __inline unsigned int apic_read(unsigned long reg)
{
return *((volatile unsigned int *)(APIC_BASE+reg));
}
static __inline__ void apic_wait_icr_idle(void)
{
while (apic_read( APIC_ICR ) & APIC_ICR_BUSY)
cpu_relax();
}
static inline void ack_APIC_irq(void)
{
/*
* ack_APIC_irq() actually gets compiled as a single instruction:
* - a single rmw on Pentium/82489DX
* - a single write on P6+ cores (CONFIG_X86_GOOD_APIC)
* ... yummie.
*/
/* Docs say use 0 for future compatibility */
apic_write(APIC_EOI, 0);
}
extern int get_maxlvt (void);
extern void clear_local_APIC (void);
extern void connect_bsp_APIC (void);
extern void disconnect_bsp_APIC (int virt_wire_setup);
extern void disable_local_APIC (void);
extern int verify_local_APIC (void);
extern void cache_APIC_registers (void);
extern void sync_Arb_IDs (void);
extern void init_bsp_APIC (void);
extern void setup_local_APIC (void);
extern void init_apic_mappings (void);
extern void smp_local_timer_interrupt (struct pt_regs * regs);
extern void setup_boot_APIC_clock (void);
extern void setup_secondary_APIC_clock (void);
extern int APIC_init_uniprocessor (void);
extern void disable_APIC_timer(void);
extern void enable_APIC_timer(void);
extern void clustered_apic_check(void);
extern void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector,
unsigned char msg_type, unsigned char mask);
#define K8_APIC_EXT_LVT_BASE 0x500
#define K8_APIC_EXT_INT_MSG_FIX 0x0
#define K8_APIC_EXT_INT_MSG_SMI 0x2
#define K8_APIC_EXT_INT_MSG_NMI 0x4
#define K8_APIC_EXT_INT_MSG_EXT 0x7
#define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0
extern int disable_timer_pin_1;
#ifndef CONFIG_XEN
void smp_send_timer_broadcast_ipi(void);
void switch_APIC_timer_to_ipi(void *cpumask);
void switch_ipi_to_APIC_timer(void *cpumask);
#define ARCH_APICTIMER_STOPS_ON_C3 1
#endif
#endif /* CONFIG_X86_LOCAL_APIC */
extern unsigned boot_cpu_id;
#endif /* __ASM_APIC_H */

View file

@ -0,0 +1,392 @@
#ifndef __ASM_APICDEF_H
#define __ASM_APICDEF_H
/*
* Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
*
* Alan Cox <Alan.Cox@linux.org>, 1995.
* Ingo Molnar <mingo@redhat.com>, 1999, 2000
*/
#define APIC_DEFAULT_PHYS_BASE 0xfee00000
#define APIC_ID 0x20
#define APIC_ID_MASK (0xFFu<<24)
#define GET_APIC_ID(x) (((x)>>24)&0xFFu)
#define SET_APIC_ID(x) (((x)<<24))
#define APIC_LVR 0x30
#define APIC_LVR_MASK 0xFF00FF
#define GET_APIC_VERSION(x) ((x)&0xFFu)
#define GET_APIC_MAXLVT(x) (((x)>>16)&0xFFu)
#define APIC_INTEGRATED(x) ((x)&0xF0u)
#define APIC_TASKPRI 0x80
#define APIC_TPRI_MASK 0xFFu
#define APIC_ARBPRI 0x90
#define APIC_ARBPRI_MASK 0xFFu
#define APIC_PROCPRI 0xA0
#define APIC_EOI 0xB0
#define APIC_EIO_ACK 0x0 /* Write this to the EOI register */
#define APIC_RRR 0xC0
#define APIC_LDR 0xD0
#define APIC_LDR_MASK (0xFFu<<24)
#define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFFu)
#define SET_APIC_LOGICAL_ID(x) (((x)<<24))
#define APIC_ALL_CPUS 0xFFu
#define APIC_DFR 0xE0
#define APIC_DFR_CLUSTER 0x0FFFFFFFul
#define APIC_DFR_FLAT 0xFFFFFFFFul
#define APIC_SPIV 0xF0
#define APIC_SPIV_FOCUS_DISABLED (1<<9)
#define APIC_SPIV_APIC_ENABLED (1<<8)
#define APIC_ISR 0x100
#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */
#define APIC_TMR 0x180
#define APIC_IRR 0x200
#define APIC_ESR 0x280
#define APIC_ESR_SEND_CS 0x00001
#define APIC_ESR_RECV_CS 0x00002
#define APIC_ESR_SEND_ACC 0x00004
#define APIC_ESR_RECV_ACC 0x00008
#define APIC_ESR_SENDILL 0x00020
#define APIC_ESR_RECVILL 0x00040
#define APIC_ESR_ILLREGA 0x00080
#define APIC_ICR 0x300
#define APIC_DEST_SELF 0x40000
#define APIC_DEST_ALLINC 0x80000
#define APIC_DEST_ALLBUT 0xC0000
#define APIC_ICR_RR_MASK 0x30000
#define APIC_ICR_RR_INVALID 0x00000
#define APIC_ICR_RR_INPROG 0x10000
#define APIC_ICR_RR_VALID 0x20000
#define APIC_INT_LEVELTRIG 0x08000
#define APIC_INT_ASSERT 0x04000
#define APIC_ICR_BUSY 0x01000
#define APIC_DEST_LOGICAL 0x00800
#define APIC_DEST_PHYSICAL 0x00000
#define APIC_DM_FIXED 0x00000
#define APIC_DM_LOWEST 0x00100
#define APIC_DM_SMI 0x00200
#define APIC_DM_REMRD 0x00300
#define APIC_DM_NMI 0x00400
#define APIC_DM_INIT 0x00500
#define APIC_DM_STARTUP 0x00600
#define APIC_DM_EXTINT 0x00700
#define APIC_VECTOR_MASK 0x000FF
#define APIC_ICR2 0x310
#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF)
#define SET_APIC_DEST_FIELD(x) ((x)<<24)
#define APIC_LVTT 0x320
#define APIC_LVTTHMR 0x330
#define APIC_LVTPC 0x340
#define APIC_LVT0 0x350
#define APIC_LVT_TIMER_BASE_MASK (0x3<<18)
#define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3)
#define SET_APIC_TIMER_BASE(x) (((x)<<18))
#define APIC_TIMER_BASE_CLKIN 0x0
#define APIC_TIMER_BASE_TMBASE 0x1
#define APIC_TIMER_BASE_DIV 0x2
#define APIC_LVT_TIMER_PERIODIC (1<<17)
#define APIC_LVT_MASKED (1<<16)
#define APIC_LVT_LEVEL_TRIGGER (1<<15)
#define APIC_LVT_REMOTE_IRR (1<<14)
#define APIC_INPUT_POLARITY (1<<13)
#define APIC_SEND_PENDING (1<<12)
#define APIC_MODE_MASK 0x700
#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7)
#define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8))
#define APIC_MODE_FIXED 0x0
#define APIC_MODE_NMI 0x4
#define APIC_MODE_EXTINT 0x7
#define APIC_LVT1 0x360
#define APIC_LVTERR 0x370
#define APIC_TMICT 0x380
#define APIC_TMCCT 0x390
#define APIC_TDCR 0x3E0
#define APIC_TDR_DIV_TMBASE (1<<2)
#define APIC_TDR_DIV_1 0xB
#define APIC_TDR_DIV_2 0x0
#define APIC_TDR_DIV_4 0x1
#define APIC_TDR_DIV_8 0x2
#define APIC_TDR_DIV_16 0x3
#define APIC_TDR_DIV_32 0x8
#define APIC_TDR_DIV_64 0x9
#define APIC_TDR_DIV_128 0xA
#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
#define MAX_IO_APICS 128
#define MAX_LOCAL_APIC 256
/*
* All x86-64 systems are xAPIC compatible.
* In the following, "apicid" is a physical APIC ID.
*/
#define XAPIC_DEST_CPUS_SHIFT 4
#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
#define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
#define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT)
#define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK)
#define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT)
/*
* the local APIC register structure, memory mapped. Not terribly well
* tested, but we might eventually use this one in the future - the
* problem why we cannot use it right now is the P5 APIC, it has an
* errata which cannot take 8-bit reads and writes, only 32-bit ones ...
*/
#define u32 unsigned int
struct local_apic {
/*000*/ struct { u32 __reserved[4]; } __reserved_01;
/*010*/ struct { u32 __reserved[4]; } __reserved_02;
/*020*/ struct { /* APIC ID Register */
u32 __reserved_1 : 24,
phys_apic_id : 4,
__reserved_2 : 4;
u32 __reserved[3];
} id;
/*030*/ const
struct { /* APIC Version Register */
u32 version : 8,
__reserved_1 : 8,
max_lvt : 8,
__reserved_2 : 8;
u32 __reserved[3];
} version;
/*040*/ struct { u32 __reserved[4]; } __reserved_03;
/*050*/ struct { u32 __reserved[4]; } __reserved_04;
/*060*/ struct { u32 __reserved[4]; } __reserved_05;
/*070*/ struct { u32 __reserved[4]; } __reserved_06;
/*080*/ struct { /* Task Priority Register */
u32 priority : 8,
__reserved_1 : 24;
u32 __reserved_2[3];
} tpr;
/*090*/ const
struct { /* Arbitration Priority Register */
u32 priority : 8,
__reserved_1 : 24;
u32 __reserved_2[3];
} apr;
/*0A0*/ const
struct { /* Processor Priority Register */
u32 priority : 8,
__reserved_1 : 24;
u32 __reserved_2[3];
} ppr;
/*0B0*/ struct { /* End Of Interrupt Register */
u32 eoi;
u32 __reserved[3];
} eoi;
/*0C0*/ struct { u32 __reserved[4]; } __reserved_07;
/*0D0*/ struct { /* Logical Destination Register */
u32 __reserved_1 : 24,
logical_dest : 8;
u32 __reserved_2[3];
} ldr;
/*0E0*/ struct { /* Destination Format Register */
u32 __reserved_1 : 28,
model : 4;
u32 __reserved_2[3];
} dfr;
/*0F0*/ struct { /* Spurious Interrupt Vector Register */
u32 spurious_vector : 8,
apic_enabled : 1,
focus_cpu : 1,
__reserved_2 : 22;
u32 __reserved_3[3];
} svr;
/*100*/ struct { /* In Service Register */
/*170*/ u32 bitfield;
u32 __reserved[3];
} isr [8];
/*180*/ struct { /* Trigger Mode Register */
/*1F0*/ u32 bitfield;
u32 __reserved[3];
} tmr [8];
/*200*/ struct { /* Interrupt Request Register */
/*270*/ u32 bitfield;
u32 __reserved[3];
} irr [8];
/*280*/ union { /* Error Status Register */
struct {
u32 send_cs_error : 1,
receive_cs_error : 1,
send_accept_error : 1,
receive_accept_error : 1,
__reserved_1 : 1,
send_illegal_vector : 1,
receive_illegal_vector : 1,
illegal_register_address : 1,
__reserved_2 : 24;
u32 __reserved_3[3];
} error_bits;
struct {
u32 errors;
u32 __reserved_3[3];
} all_errors;
} esr;
/*290*/ struct { u32 __reserved[4]; } __reserved_08;
/*2A0*/ struct { u32 __reserved[4]; } __reserved_09;
/*2B0*/ struct { u32 __reserved[4]; } __reserved_10;
/*2C0*/ struct { u32 __reserved[4]; } __reserved_11;
/*2D0*/ struct { u32 __reserved[4]; } __reserved_12;
/*2E0*/ struct { u32 __reserved[4]; } __reserved_13;
/*2F0*/ struct { u32 __reserved[4]; } __reserved_14;
/*300*/ struct { /* Interrupt Command Register 1 */
u32 vector : 8,
delivery_mode : 3,
destination_mode : 1,
delivery_status : 1,
__reserved_1 : 1,
level : 1,
trigger : 1,
__reserved_2 : 2,
shorthand : 2,
__reserved_3 : 12;
u32 __reserved_4[3];
} icr1;
/*310*/ struct { /* Interrupt Command Register 2 */
union {
u32 __reserved_1 : 24,
phys_dest : 4,
__reserved_2 : 4;
u32 __reserved_3 : 24,
logical_dest : 8;
} dest;
u32 __reserved_4[3];
} icr2;
/*320*/ struct { /* LVT - Timer */
u32 vector : 8,
__reserved_1 : 4,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
timer_mode : 1,
__reserved_3 : 14;
u32 __reserved_4[3];
} lvt_timer;
/*330*/ struct { /* LVT - Thermal Sensor */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
__reserved_3 : 15;
u32 __reserved_4[3];
} lvt_thermal;
/*340*/ struct { /* LVT - Performance Counter */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
__reserved_3 : 15;
u32 __reserved_4[3];
} lvt_pc;
/*350*/ struct { /* LVT - LINT0 */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
polarity : 1,
remote_irr : 1,
trigger : 1,
mask : 1,
__reserved_2 : 15;
u32 __reserved_3[3];
} lvt_lint0;
/*360*/ struct { /* LVT - LINT1 */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
polarity : 1,
remote_irr : 1,
trigger : 1,
mask : 1,
__reserved_2 : 15;
u32 __reserved_3[3];
} lvt_lint1;
/*370*/ struct { /* LVT - Error */
u32 vector : 8,
__reserved_1 : 4,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
__reserved_3 : 15;
u32 __reserved_4[3];
} lvt_error;
/*380*/ struct { /* Timer Initial Count Register */
u32 initial_count;
u32 __reserved_2[3];
} timer_icr;
/*390*/ const
struct { /* Timer Current Count Register */
u32 curr_count;
u32 __reserved_2[3];
} timer_ccr;
/*3A0*/ struct { u32 __reserved[4]; } __reserved_16;
/*3B0*/ struct { u32 __reserved[4]; } __reserved_17;
/*3C0*/ struct { u32 __reserved[4]; } __reserved_18;
/*3D0*/ struct { u32 __reserved[4]; } __reserved_19;
/*3E0*/ struct { /* Timer Divide Configuration Register */
u32 divisor : 4,
__reserved_1 : 28;
u32 __reserved_2[3];
} timer_dcr;
/*3F0*/ struct { u32 __reserved[4]; } __reserved_20;
} __attribute__ ((packed));
#undef u32
#define BAD_APICID 0xFFu
#endif

View file

@ -0,0 +1,608 @@
/*
* Portions Copyright 2008, 2010 VMware, Inc.
*/
#ifndef __ARCH_X86_64_ATOMIC__
#define __ARCH_X86_64_ATOMIC__
#include <asm/alternative.h>
#if defined(__VMKLNX__)
#include "vmkapi.h"
#endif /* defined(__VMKLNX__) */
/* atomic_t should be 32 bit signed type */
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
#ifdef CONFIG_SMP
#define LOCK "lock ; "
#else
#define LOCK ""
#endif
/*
* Make sure gcc doesn't try to be clever and move things around
* on us. We need to use _exactly_ the address the user gave us,
* not some alias that contains the same information.
*/
typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) { (i) }
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v.
*/
#define atomic_read(v) ((v)->counter)
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
#define atomic_set(v,i) (((v)->counter) = (i))
/**
* atomic_add - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v.
*/
/* _VMKLNX_CODECHECK_: atomic_add */
static __inline__ void atomic_add(int i, atomic_t *v)
{
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "addl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
}
/**
* atomic_sub - subtract the atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v.
*/
/* _VMKLNX_CODECHECK_: atomic_sub */
static __inline__ void atomic_sub(int i, atomic_t *v)
{
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "subl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
}
/**
* atomic_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
{
unsigned char c;
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "subl %2,%0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
return c;
}
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1.
*/
/* _VMKLNX_CODECHECK_: atomic_inc */
static __inline__ void atomic_inc(atomic_t *v)
{
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "incl %0"
:"=m" (v->counter)
:"m" (v->counter));
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
}
/**
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1.
*/
/* _VMKLNX_CODECHECK_: atomic_dec */
static __inline__ void atomic_dec(atomic_t *v)
{
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "decl %0"
:"=m" (v->counter)
:"m" (v->counter));
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
}
/**
* atomic_dec_and_test - decrement and test
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
/* _VMKLNX_CODECHECK_: atomic_dec_and_test */
static __inline__ int atomic_dec_and_test(atomic_t *v)
{
unsigned char c;
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "decl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
return c != 0;
}
/**
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
static __inline__ int atomic_inc_and_test(atomic_t *v)
{
unsigned char c;
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "incl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
return c != 0;
}
/**
* atomic_add_negative - add and test if negative
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static __inline__ int atomic_add_negative(int i, atomic_t *v)
{
unsigned char c;
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "addl %2,%0; sets %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
return c;
}
/**
* atomic_add_return - add and return
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v and returns @i + @v
*/
/* _VMKLNX_CODECHECK_: atomic_add_return */
static __inline__ int atomic_add_return(int i, atomic_t *v)
{
int __i = i;
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "xaddl %0, %1;"
:"=r"(i)
:"m"(v->counter), "0"(i));
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
return i + __i;
}
static __inline__ int atomic_sub_return(int i, atomic_t *v)
{
return atomic_add_return(-i,v);
}
/**
* atomic_inc_return - increment by 1 and return
* @v: integer value to increment
*
* Atomically increments @v by 1 returns @v + 1
*
* SYNOPSIS:
* #define atomic_inc_return(v)
*
* RETURN VALUE:
* Returns @v + 1
*/
/* _VMKLNX_CODECHECK_: atomic_inc_return */
#define atomic_inc_return(v) (atomic_add_return(1,v))
#define atomic_dec_return(v) (atomic_sub_return(1,v))
/* An 64bit atomic type */
typedef struct { volatile long counter; } atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
/**
* atomic64_read - read atomic64 variable
* @v: pointer of type atomic64_t
*
* Atomically reads the value of @v.
* Doesn't imply a read memory barrier.
*/
#define atomic64_read(v) ((v)->counter)
/**
* atomic64_set - set atomic64 variable
* @v: pointer to type atomic64_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
#if defined(__VMKLNX__)
static __inline__ void atomic64_set(atomic64_t *v, long i)
{
/*
* Ensure that we do a single movq. Without this, the compiler
* may do write with a constant as two movl operations.
*/
__asm__ __volatile__(
"movq %1, %0"
: "+m" (v->counter)
: "r" (i)
);
}
#else /* !defined(__VMKLNX__) */
#define atomic64_set(v,i) (((v)->counter) = (i))
#endif /* defined(__VMKLNX__) */
/**
* atomic64_add - add integer to atomic64 variable
* @i: integer value to add
* @v: pointer to type atomic64_t
*
* Atomically adds @i to @v.
*/
static __inline__ void atomic64_add(long i, atomic64_t *v)
{
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "addq %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
}
/**
* atomic64_sub - subtract the atomic64 variable
* @i: integer value to subtract
* @v: pointer to type atomic64_t
*
* Atomically subtracts @i from @v.
*/
static __inline__ void atomic64_sub(long i, atomic64_t *v)
{
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "subq %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
}
/**
* atomic64_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer to type atomic64_t
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
{
unsigned char c;
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "subq %2,%0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
return c;
}
/**
* atomic64_inc - increment atomic64 variable
* @v: pointer to type atomic64_t
*
* Atomically increments @v by 1.
*/
static __inline__ void atomic64_inc(atomic64_t *v)
{
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "incq %0"
:"=m" (v->counter)
:"m" (v->counter));
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
}
/**
* atomic64_dec - decrement atomic64 variable
* @v: pointer to type atomic64_t
*
* Atomically decrements @v by 1.
*/
static __inline__ void atomic64_dec(atomic64_t *v)
{
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "decq %0"
:"=m" (v->counter)
:"m" (v->counter));
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
}
/**
* atomic64_dec_and_test - decrement and test
* @v: pointer to type atomic64_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
static __inline__ int atomic64_dec_and_test(atomic64_t *v)
{
unsigned char c;
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "decq %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
return c != 0;
}
/**
* atomic64_inc_and_test - increment and test
* @v: pointer to type atomic64_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
static __inline__ int atomic64_inc_and_test(atomic64_t *v)
{
unsigned char c;
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "incq %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
return c != 0;
}
/**
* atomic64_add_negative - add and test if negative
* @i: integer value to add
* @v: pointer to type atomic64_t
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
{
unsigned char c;
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "addq %2,%0; sets %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
return c;
}
/**
* atomic64_add_return - add and return
* @i: integer value to add
* @v: pointer to type atomic64_t
*
* Atomically adds @i to @v and returns @i + @v
*/
static __inline__ long atomic64_add_return(long i, atomic64_t *v)
{
long __i = i;
#if defined(__VMKLNX__)
vmk_AtomicPrologue();
#endif /* defined(__VMKLNX__) */
__asm__ __volatile__(
LOCK_PREFIX "xaddq %0, %1;"
:"=r"(i)
:"m"(v->counter), "0"(i));
#if defined(__VMKLNX__)
vmk_AtomicEpilogue();
#endif /* defined(__VMKLNX__) */
return i + __i;
}
static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
{
return atomic64_add_return(-i,v);
}
#define atomic64_inc_return(v) (atomic64_add_return(1,v))
#define atomic64_dec_return(v) (atomic64_sub_return(1,v))
#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
* atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
#define atomic_add_unless(v, a, u) \
({ \
int c, old; \
c = atomic_read(v); \
for (;;) { \
if (unlikely(c == (u))) \
break; \
old = atomic_cmpxchg((v), c, c + (a)); \
if (likely(old == c)) \
break; \
c = old; \
} \
c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/* These are x86-specific, used by some header files */
#if defined(__VMKLNX__)
#define atomic_clear_mask(mask, addr) \
do { \
vmk_AtomicPrologue(); \
__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
: : "r" (~(mask)),"m" (*addr) : "memory") ; \
vmk_AtomicEpilogue(); \
} while (0)
#define atomic_set_mask(mask, addr) \
do { \
vmk_AtomicPrologue(); \
__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
: : "r" ((unsigned)mask),"m" (*(addr)) : "memory"); \
vmk_AtomicEpilogue(); \
} while (0)
#else /* !defined(__VMKLNX__) */
#define atomic_clear_mask(mask, addr) \
__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
: : "r" (~(mask)),"m" (*addr) : "memory")
#define atomic_set_mask(mask, addr) \
__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
: : "r" ((unsigned)mask),"m" (*(addr)) : "memory")
#endif /* defined(__VMKLNX__) */
/* Atomic operations are already serializing on x86 */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif

View file

@ -0,0 +1,4 @@
#ifndef __ASM_X86_64_AUXVEC_H
#define __ASM_X86_64_AUXVEC_H
#endif

View file

@ -0,0 +1,729 @@
/*
* Portions Copyright 2008, 2009 VMware, Inc.
*/
#ifndef _X86_64_BITOPS_H
#define _X86_64_BITOPS_H
#if defined(__VMKLNX__)
#include "vmkapi.h"
#endif /* defined(__VMKLNX__) */
/*
* Copyright 1992, Linus Torvalds.
*/
#include <asm/alternative.h>
#define ADDR (*(volatile long *) addr)
/**
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* This function is atomic and may not be reordered. See __set_bit()
* if you do not require the atomic guarantees.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*
* RETURN VALUE:
* NONE
*
*/
/* _VMKLNX_CODECHECK_: set_bit */
static __inline__ void set_bit(int nr, volatile void * addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btsl %1,%0"
:"+m" (ADDR)
:"dIr" (nr) : "memory");
}
/**
* __set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* Unlike set_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static __inline__ void __set_bit(int nr, volatile void * addr)
{
__asm__ volatile(
"btsl %1,%0"
:"+m" (ADDR)
:"dIr" (nr) : "memory");
}
/**
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* Clears a bit in memory.
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*
* RETURN VALUE:
* None
*
*/
/* _VMKLNX_CODECHECK_: clear_bit */
static __inline__ void clear_bit(int nr, volatile void * addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btrl %1,%0"
:"+m" (ADDR)
:"dIr" (nr));
}
static __inline__ void __clear_bit(int nr, volatile void * addr)
{
__asm__ __volatile__(
"btrl %1,%0"
:"+m" (ADDR)
:"dIr" (nr));
}
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
/**
* __change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
* Unlike change_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static __inline__ void __change_bit(int nr, volatile void * addr)
{
__asm__ __volatile__(
"btcl %1,%0"
:"+m" (ADDR)
:"dIr" (nr));
}
/**
* change_bit - Toggle a bit in memory
* @nr: Bit to change
* @addr: Address to start counting from
*
* change_bit() is atomic and may not be reordered.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static __inline__ void change_bit(int nr, volatile void * addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btcl %1,%0"
:"+m" (ADDR)
:"dIr" (nr));
}
/**
* test_and_set_bit - Set a bit and return its old state
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
* It tests if the bit at position nr in *addr is 0 or not and sets it to 1.
* Note that the return value need not be 1 (just non-zero) if the bit was 1.
*
* RETURN VALUE:
* 0 if original bit was 0 and NON-ZERO otherwise
*/
/* _VMKLNX_CODECHECK_: test_and_set_bit */
static __inline__ int test_and_set_bit(int nr, volatile void * addr)
{
int oldbit;
__asm__ __volatile__( LOCK_PREFIX
"btsl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"+m" (ADDR)
:"dIr" (nr) : "memory");
return oldbit;
}
/**
* __test_and_set_bit - Set a bit and return its old state
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
* It tests if the bit at position nr in *addr is 0 or not and sets it to 1.
* Note that the return value need not be 1 (just non-zero) if the bit was 1.
*
* RETURN VALUE:
* 0 if original bit was 0 and NON-ZERO otherwise
*
* SEE ALSO:
* test_and_set_bit
*/
static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
{
int oldbit;
__asm__(
"btsl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"+m" (ADDR)
:"dIr" (nr));
return oldbit;
}
/**
* test_and_clear_bit - Clear a bit and return its old state
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
* It tests if the bit at position nr in *addr is 0 or not and sets it to 0.
* Note that the return value need not be 1 (just non-zero) if the bit was 1.
*
* RETURN VALUE:
* 0 if original bit was 0 and NON-ZERO otherwise
*/
/* _VMKLNX_CODECHECK_: test_and_clear_bit */
static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
{
int oldbit;
__asm__ __volatile__( LOCK_PREFIX
"btrl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"+m" (ADDR)
:"dIr" (nr) : "memory");
return oldbit;
}
/**
* __test_and_clear_bit - Clear a bit and return its old state
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
* It tests if the bit at position nr in *addr is 0 or not and sets it to 0.
* Note that the return value need not be 1 (just non-zero) if the bit was 1.
*
* RETURN VALUE:
* 0 if original bit was 0 and NON-ZERO otherwise
*
* SEE ALSO:
* test_and_clear_bit
*/
static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
{
int oldbit;
__asm__(
"btrl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"+m" (ADDR)
:"dIr" (nr));
return oldbit;
}
/**
* __test_and_change_bit - Toggle a bit and return its old state
* @nr: Bit to toggle
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* It also implies a memory barrier.
* It tests if the bit at position nr in *addr is 0 or not and toggles it.
* Note that the return value need not be 1 (just non-zero) if the bit was 1.
*
* RETURN VALUE:
* 0 if original bit was 0 and NON-ZERO otherwise
*
* SEE ALSO:
* test_and_change_bit
*/
static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
{
int oldbit;
__asm__ __volatile__(
"btcl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"+m" (ADDR)
:"dIr" (nr) : "memory");
return oldbit;
}
/**
* test_and_change_bit - Toggle a bit and return its old state
* @nr: Bit to toggle
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
* It tests if the bit at position nr in *addr is 0 or not and toggles it.
* Note that the return value need not be 1 (just non-zero) if the bit was 1.
*
* RETURN VALUE:
* 0 if original bit was 0 and NON-ZERO otherwise
*/
/* _VMKLNX_CODECHECK_: test_and_change_bit */
static __inline__ int test_and_change_bit(int nr, volatile void * addr)
{
int oldbit;
__asm__ __volatile__( LOCK_PREFIX
"btcl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"+m" (ADDR)
:"dIr" (nr) : "memory");
return oldbit;
}
/**
* constant_test_bit - determine whether a bit is set
* @nr: bit number to test
* @addr: addr to test
*
* Determines the state of the specified bit.
* This is used when @nr is known to be constant at compile-time.
* Use test_bit() instead of using this directly.
*
* RETURN VALUE:
* 0 if the bit was 0 and NON-ZERO otherwise
*/
/* _VMKLNX_CODECHECK_: constant_test_bit */
static __inline__ int constant_test_bit(int nr, const volatile void * addr)
{
return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
}
/**
* variable_test_bit - determine whether a bit is set
* @nr: bit number to test
* @addr: addr to test
*
* Determines the state of the specified bit.
* This is used when @nr is a variable.
* Use test_bit() instead of using this directly.
*
* RETURN VALUE:
* 0 if the bit was 0 and NON-ZERO otherwise
*/
/* _VMKLNX_CODECHECK_: variable_test_bit */
static __inline__ int variable_test_bit(int nr, volatile const void * addr)
{
int oldbit;
__asm__ __volatile__(
"btl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit)
:"m" (ADDR),"dIr" (nr));
return oldbit;
}
/**
* test_bit - Determine if bit at given position is set
* @nr: number of bit to be tested
* @addr: pointer to byte to test
*
* It tests if the bit at position nr in *addr is 0 or not.
* If the bit number is a constant an optimized bit extract is done.
* Note that the return value need not be 1 (just non-zero) if the bit was 1.
*
* SYNOPSIS:
* #define test_bit(nr,addr)
*
* RETURN VALUE:
* 0 if the bit was 0 and NON-ZERO otherwise
*/
/* _VMKLNX_CODECHECK_: test_bit */
#define test_bit(nr,addr) \
(__builtin_constant_p(nr) ? \
constant_test_bit((nr),(addr)) : \
variable_test_bit((nr),(addr)))
#undef ADDR
#if defined(__VMKLNX__)
/**
* find_first_zero_bit - find the first zero bit in a memory region
* @addr: The address to start the search at
* @size: The maximum bitnumber to search
*
* Finds the first zero bit in a specified memory region
*
* RETURN VALUE:
* Returns the bit-number of the first zero bit, not the number of the byte
* containing a bit.
* If result is equal to or greater than size means no zero bit is found
*/
/* _VMKLNX_CODECHECK_: find_first_zero_bit */
static __inline__ long
find_first_zero_bit(const unsigned long * addr, unsigned long size)
{
long d0, d1, d2;
long res;
/*
* We must test the size in words, not in bits, because
* otherwise incoming sizes in the range -63..-1 will not run
* any scasq instructions, and then the flags used by the je
* instruction will have whatever random value was in place
* before. Nobody should call us like that, but
* find_next_zero_bit() does when offset and size are at the
* same word and it fails to find a zero itself.
*/
size += 63;
size >>= 6;
if (!size)
return 0;
vmk_CPUEnsureClearDF();
asm volatile(
" repe; scasq\n"
" je 1f\n"
" xorq -8(%%rdi),%%rax\n"
" subq $8,%%rdi\n"
" bsfq %%rax,%%rdx\n"
"1: subq %[addr],%%rdi\n"
" shlq $3,%%rdi\n"
" addq %%rdi,%%rdx"
:"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
:"0" (0ULL), "1" (size), "2" (addr), "3" (-1ULL),
[addr] "S" (addr) : "memory");
/*
* Any register would do for [addr] above, but GCC tends to
* prefer rbx over rsi, even though rsi is readily available
* and doesn't have to be saved.
*/
return res;
}
/**
* find_next_zero_bit - find the first zero bit in a memory region
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @size: The maximum size to search
*
* Finds the first zero bit in a specified memory region
*
* RETURN VALUE:
* Returns the bit-number of the first zero bit in a memory region after the
* specified offset
* If result is equal to or greater than size means no zero bit is found
*/
/* _VMKLNX_CODECHECK_: find_next_zero_bit */
static __inline__ long
find_next_zero_bit (const unsigned long * addr, long size, long offset)
{
const unsigned long * p = addr + (offset >> 6);
unsigned long set = 0;
unsigned long res, bit = offset&63;
if (bit) {
/*
* Look for zero in first word
*/
asm("bsfq %1,%0\n\t"
"cmoveq %2,%0"
: "=r" (set)
: "r" (~(*p >> bit)), "r"(64L));
if (set < (64 - bit))
return set + offset;
set = 64 - bit;
p++;
}
/*
* No zero yet, search remaining full words for a zero
*/
res = find_first_zero_bit (p, size - 64 * (p - addr));
return (offset + set + res);
}
/**
* find_first_bit - find the first set bit in a memory region
* @addr: The address to start the search at
* @size: The maximum size to search
*
* Finds the first set bit in a specified memory region
*
* RETURN VALUE:
* Returns the bit-number of the first set bit, not the number of the byte
* containing a bit.
*
*/
/* _VMKLNX_CODECHECK_: find_first_bit */
static __inline__ long find_first_bit(const unsigned long * addr, unsigned long size)
{
long d0, d1;
long res;
/*
* We must test the size in words, not in bits, because
* otherwise incoming sizes in the range -63..-1 will not run
* any scasq instructions, and then the flags used by the jz
* instruction will have whatever random value was in place
* before. Nobody should call us like that, but
* find_next_bit() does when offset and size are at the same
* word and it fails to find a one itself.
*/
size += 63;
size >>= 6;
if (!size)
return 0;
vmk_CPUEnsureClearDF();
asm volatile(
" repe; scasq\n"
" jz 1f\n"
" subq $8,%%rdi\n"
" bsfq (%%rdi),%%rax\n"
"1: subq %[addr],%%rdi\n"
" shlq $3,%%rdi\n"
" addq %%rdi,%%rax"
:"=a" (res), "=&c" (d0), "=&D" (d1)
:"0" (0ULL), "1" (size), "2" (addr),
[addr] "r" (addr) : "memory");
return res;
}
/**
* find_next_bit - find the first set bit in a memory region
* @addr: The address to base the search on
* @size: The maximum size to search
* @offset: The bitnumber to start searching at
*
* Finds the first set bit in a specified memory region
*
* RETURN VALUE:
* Position of the first set bit in the specified memory, starting from offset.
* If none is found, the full word, starting from addr, is searched.
*/
/* _VMKLNX_CODECHECK_: find_next_bit */
static __inline__ long find_next_bit(const unsigned long * addr, long size, long offset)
{
const unsigned long * p = addr + (offset >> 6);
unsigned long set = 0, bit = offset & 63, res;
if (bit) {
/*
* Look for nonzero in the first 64 bits:
*/
asm("bsfq %1,%0\n\t"
"cmoveq %2,%0\n\t"
: "=r" (set)
: "r" (*p >> bit), "r" (64L));
if (set < (64 - bit))
return set + offset;
set = 64 - bit;
p++;
}
/*
* No set bit yet, search remaining full words for a bit
*/
res = find_first_bit (p, size - 64 * (p - addr));
return (offset + set + res);
}
#else /* !defined(__VMKLNX__) */
extern long find_first_zero_bit(const unsigned long * addr, unsigned long size);
extern long find_next_zero_bit (const unsigned long * addr, long size, long offset);
extern long find_first_bit(const unsigned long * addr, unsigned long size);
extern long find_next_bit(const unsigned long * addr, long size, long offset);
#endif /* defined(__VMKLNX__) */
/**
* __scanbit - searches unsigned long value for the least significant set bit
* @val: The unsigned long value to scan for the set bit
* @max: The value to return if no set bit found
*
* Finds the least significant set bit in specified unsigned long value
*
* RETURN VALUE:
* Index of first bit set in val or max when no bit is set
*/
/* _VMKLNX_CODECHECK_: __scanbit */
static inline unsigned long __scanbit(unsigned long val, unsigned long max)
{
asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max));
return val;
}
#define find_first_bit(addr,size) \
((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
(__scanbit(*(unsigned long *)addr,(size))) : \
find_first_bit(addr,size)))
#define find_next_bit(addr,size,off) \
((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
find_next_bit(addr,size,off)))
#define find_first_zero_bit(addr,size) \
((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
(__scanbit(~*(unsigned long *)addr,(size))) : \
find_first_zero_bit(addr,size)))
#define find_next_zero_bit(addr,size,off) \
((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
find_next_zero_bit(addr,size,off)))
/*
* Find string of zero bits in a bitmap. -1 when not found.
*/
extern unsigned long
find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len);
static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
int len)
{
unsigned long end = i + len;
while (i < end) {
__set_bit(i, bitmap);
i++;
}
}
static inline void __clear_bit_string(unsigned long *bitmap, unsigned long i,
int len)
{
unsigned long end = i + len;
while (i < end) {
__clear_bit(i, bitmap);
i++;
}
}
/**
* ffz - find first zero in word.
* @word: The word to search
*
* Undefined if no zero exists, so code should check against ~0UL first.
*
* RETURN VALUE:
* Position of the first zero in the word
*/
/* _VMKLNX_CODECHECK_: ffz */
static __inline__ unsigned long ffz(unsigned long word)
{
__asm__("bsfq %1,%0"
:"=r" (word)
:"r" (~word));
return word;
}
/**
* __ffs - find first set bit in word
* @word: The word to search
*
* Undefined if no bit is set, so code should check against 0 first.
*
* RETURN VALUE:
* Position of the first set bit in the word
*/
/* _VMKLNX_CODECHECK_: __ffs */
static __inline__ unsigned long __ffs(unsigned long word)
{
__asm__("bsfq %1,%0"
:"=r" (word)
:"rm" (word));
return word;
}
/*
* __fls: find last bit set.
* @word: The word to search
*
* Undefined if no zero exists, so code should check against ~0UL first.
*/
static __inline__ unsigned long __fls(unsigned long word)
{
__asm__("bsrq %1,%0"
:"=r" (word)
:"rm" (word));
return word;
}
#ifdef __KERNEL__
#include <asm-generic/bitops/sched.h>
/**
* ffs - find first bit set
* @x: the word to search
*
* Finds the first bit set in the referenced word
*
* RETURN VALUE:
* Position of the first set bit
*/
/* _VMKLNX_CODECHECK_: ffs */
static __inline__ int ffs(int x)
{
int r;
__asm__("bsfl %1,%0\n\t"
"cmovzl %2,%0"
: "=r" (r) : "rm" (x), "r" (-1));
return r+1;
}
/**
* fls64 - find last bit set in 64 bit word
* @x: the word to search
*
* This is defined the same way as fls.
*/
static __inline__ int fls64(__u64 x)
{
if (x == 0)
return 0;
return __fls(x) + 1;
}
/**
* fls - find last bit set
* @x: the word to search
*
* Finds last bit set in the specified word
*
* RETURN VALUE:
* The last set bit in specified word
*/
/* _VMKLNX_CODECHECK_: fls */
static __inline__ int fls(int x)
{
int r;
__asm__("bsrl %1,%0\n\t"
"cmovzl %2,%0"
: "=&r" (r) : "rm" (x), "rm" (-1));
return r+1;
}
#include <asm-generic/bitops/hweight.h>
#endif /* __KERNEL__ */
#ifdef __KERNEL__
#include <asm-generic/bitops/ext2-non-atomic.h>
#define ext2_set_bit_atomic(lock,nr,addr) \
test_and_set_bit((nr),(unsigned long*)addr)
#define ext2_clear_bit_atomic(lock,nr,addr) \
test_and_clear_bit((nr),(unsigned long*)addr)
#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
#endif /* _X86_64_BITOPS_H */

View file

@ -0,0 +1,15 @@
#ifndef _LINUX_BOOT_H
#define _LINUX_BOOT_H
/* Don't touch these, unless you really know what you're doing. */
#define DEF_INITSEG 0x9000
#define DEF_SYSSEG 0x1000
#define DEF_SETUPSEG 0x9020
#define DEF_SYSSIZE 0x7F00
/* Internal svga startup constants */
#define NORMAL_VGA 0xffff /* 80x25 mode */
#define EXTENDED_VGA 0xfffe /* 80x50 mode */
#define ASK_VGA 0xfffd /* ask for it at bootup */
#endif

View file

@ -0,0 +1,40 @@
#ifndef _X86_64_BOOTSETUP_H
#define _X86_64_BOOTSETUP_H 1
#define BOOT_PARAM_SIZE 4096
extern char x86_boot_params[BOOT_PARAM_SIZE];
/*
* This is set up by the setup-routine at boot-time
*/
#define PARAM ((unsigned char *)x86_boot_params)
#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
#define ALT_MEM_K (*(unsigned int *) (PARAM+0x1e0))
#define E820_MAP_NR (*(char*) (PARAM+E820NR))
#define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
#define SAVED_VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
#define KERNEL_START (*(unsigned int *) (PARAM+0x214))
#define INITRD_START (*(unsigned int *) (PARAM+0x218))
#define INITRD_SIZE (*(unsigned int *) (PARAM+0x21c))
#define EDID_INFO (*(struct edid_info *) (PARAM+0x140))
#define EDD_NR (*(unsigned char *) (PARAM+EDDNR))
#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
#define COMMAND_LINE saved_command_line
#define RAMDISK_IMAGE_START_MASK 0x07FF
#define RAMDISK_PROMPT_FLAG 0x8000
#define RAMDISK_LOAD_FLAG 0x4000
#endif

View file

@ -0,0 +1,51 @@
#ifndef __ASM_X8664_BUG_H
#define __ASM_X8664_BUG_H 1
#include <linux/stringify.h>
/*
* Tell the user there is some problem. The exception handler decodes
* this frame.
*/
struct bug_frame {
unsigned char ud2[2];
unsigned char push;
signed int filename;
unsigned char ret;
unsigned short line;
} __attribute__((packed));
#ifdef CONFIG_BUG
#define HAVE_ARCH_BUG
/* We turn the bug frame into valid instructions to not confuse
the disassembler. Thanks to Jan Beulich & Suresh Siddha
for nice instruction selection.
The magic numbers generate mov $64bitimm,%eax ; ret $offset. */
/**
* BUG - Prints a message
*
* Prints a log message
*
* SYNOPSIS:
* #define BUG()
*
* ESX Deviation Notes:
* As in x86_64 most variants of Linux, generates a
* system stop (panic).
*
* RETURN VALUE:
* None
*
*/
/* _VMKLNX_CODECHECK_: BUG */
#define BUG() \
asm volatile( \
"ud2 ; pushq $%c1 ; ret $%c0" :: \
"i"(__LINE__), "i" (__FILE__))
void out_of_line_bug(void);
#else
static inline void out_of_line_bug(void) { }
#endif
#include <asm-generic/bug.h>
#endif

View file

@ -0,0 +1,28 @@
/*
* include/asm-x86_64/bugs.h
*
* Copyright (C) 1994 Linus Torvalds
* Copyright (C) 2000 SuSE
*
* This is included by init/main.c to check for architecture-dependent bugs.
*
* Needs:
* void check_bugs(void);
*/
#include <asm/processor.h>
#include <asm/i387.h>
#include <asm/msr.h>
#include <asm/pda.h>
extern void alternative_instructions(void);
static void __init check_bugs(void)
{
identify_cpu(&boot_cpu_data);
#if !defined(CONFIG_SMP)
printk("CPU: ");
print_cpu_info(&boot_cpu_data);
#endif
alternative_instructions();
}

View file

@ -0,0 +1,33 @@
#ifndef _X86_64_BYTEORDER_H
#define _X86_64_BYTEORDER_H
#include <asm/types.h>
#include <linux/compiler.h>
#ifdef __GNUC__
static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
{
__asm__("bswapq %0" : "=r" (x) : "0" (x));
return x;
}
static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
{
__asm__("bswapl %0" : "=r" (x) : "0" (x));
return x;
}
/* Do not define swab16. Gcc is smart enough to recognize "C" version and
convert it into rotation or exhange. */
#define __arch__swab32(x) ___arch__swab32(x)
#define __arch__swab64(x) ___arch__swab64(x)
#endif /* __GNUC__ */
#define __BYTEORDER_HAS_U64__
#include <linux/byteorder/little_endian.h>
#endif /* _X86_64_BYTEORDER_H */

View file

@ -0,0 +1,26 @@
/*
* include/asm-x8664/cache.h
*/
#ifndef __ARCH_X8664_CACHE_H
#define __ARCH_X8664_CACHE_H
/* L1 cache line size */
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#ifdef CONFIG_X86_VSMP
/* vSMP Internode cacheline shift */
#define INTERNODE_CACHE_SHIFT (12)
#ifdef CONFIG_SMP
#define __cacheline_aligned_in_smp \
__attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) \
__attribute__((__section__(".data.page_aligned")))
#endif
#endif
#define __read_mostly __attribute__((__section__(".data.read_mostly")))
#endif

View file

@ -0,0 +1,34 @@
#ifndef _X8664_CACHEFLUSH_H
#define _X8664_CACHEFLUSH_H
/* Keep includes the same across arches. */
#include <linux/mm.h>
/* Caches aren't brain-dead on the intel. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
void global_flush_tlb(void);
int change_page_attr(struct page *page, int numpages, pgprot_t prot);
int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot);
#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void);
#endif
#endif /* _X8664_CACHEFLUSH_H */

View file

@ -0,0 +1,64 @@
/*
* Derived from include/asm-powerpc/iommu.h
*
* Copyright (C) IBM Corporation, 2006
*
* Author: Jon Mason <jdmason@us.ibm.com>
* Author: Muli Ben-Yehuda <muli@il.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_X86_64_CALGARY_H
#define _ASM_X86_64_CALGARY_H
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <asm/types.h>
struct iommu_table {
unsigned long it_base; /* mapped address of tce table */
unsigned long it_hint; /* Hint for next alloc */
unsigned long *it_map; /* A simple allocation bitmap for now */
spinlock_t it_lock; /* Protects it_map */
unsigned int it_size; /* Size of iommu table in entries */
unsigned char it_busno; /* Bus number this table belongs to */
void __iomem *bbar;
u64 tar_val;
struct timer_list watchdog_timer;
};
#define TCE_TABLE_SIZE_UNSPECIFIED ~0
#define TCE_TABLE_SIZE_64K 0
#define TCE_TABLE_SIZE_128K 1
#define TCE_TABLE_SIZE_256K 2
#define TCE_TABLE_SIZE_512K 3
#define TCE_TABLE_SIZE_1M 4
#define TCE_TABLE_SIZE_2M 5
#define TCE_TABLE_SIZE_4M 6
#define TCE_TABLE_SIZE_8M 7
extern int use_calgary;
#ifdef CONFIG_CALGARY_IOMMU
extern int calgary_iommu_init(void);
extern void detect_calgary(void);
#else
static inline int calgary_iommu_init(void) { return 1; }
static inline void detect_calgary(void) { return; }
#endif
#endif /* _ASM_X86_64_CALGARY_H */

View file

@ -0,0 +1,162 @@
/*
* Some macros to handle stack frames in assembly.
*/
#define R15 0
#define R14 8
#define R13 16
#define R12 24
#define RBP 32
#define RBX 40
/* arguments: interrupts/non tracing syscalls only save upto here*/
#define R11 48
#define R10 56
#define R9 64
#define R8 72
#define RAX 80
#define RCX 88
#define RDX 96
#define RSI 104
#define RDI 112
#define ORIG_RAX 120 /* + error_code */
/* end of arguments */
/* cpu exception frame or undefined in case of fast syscall. */
#define RIP 128
#define CS 136
#define EFLAGS 144
#define RSP 152
#define SS 160
#define ARGOFFSET R11
#define SWFRAME ORIG_RAX
.macro SAVE_ARGS addskip=0,norcx=0,nor891011=0
subq $9*8+\addskip,%rsp
CFI_ADJUST_CFA_OFFSET 9*8+\addskip
movq %rdi,8*8(%rsp)
CFI_REL_OFFSET rdi,8*8
movq %rsi,7*8(%rsp)
CFI_REL_OFFSET rsi,7*8
movq %rdx,6*8(%rsp)
CFI_REL_OFFSET rdx,6*8
.if \norcx
.else
movq %rcx,5*8(%rsp)
CFI_REL_OFFSET rcx,5*8
.endif
movq %rax,4*8(%rsp)
CFI_REL_OFFSET rax,4*8
.if \nor891011
.else
movq %r8,3*8(%rsp)
CFI_REL_OFFSET r8,3*8
movq %r9,2*8(%rsp)
CFI_REL_OFFSET r9,2*8
movq %r10,1*8(%rsp)
CFI_REL_OFFSET r10,1*8
movq %r11,(%rsp)
CFI_REL_OFFSET r11,0*8
.endif
.endm
#define ARG_SKIP 9*8
.macro RESTORE_ARGS skiprax=0,addskip=0,skiprcx=0,skipr11=0,skipr8910=0,skiprdx=0
.if \skipr11
.else
movq (%rsp),%r11
CFI_RESTORE r11
.endif
.if \skipr8910
.else
movq 1*8(%rsp),%r10
CFI_RESTORE r10
movq 2*8(%rsp),%r9
CFI_RESTORE r9
movq 3*8(%rsp),%r8
CFI_RESTORE r8
.endif
.if \skiprax
.else
movq 4*8(%rsp),%rax
CFI_RESTORE rax
.endif
.if \skiprcx
.else
movq 5*8(%rsp),%rcx
CFI_RESTORE rcx
.endif
.if \skiprdx
.else
movq 6*8(%rsp),%rdx
CFI_RESTORE rdx
.endif
movq 7*8(%rsp),%rsi
CFI_RESTORE rsi
movq 8*8(%rsp),%rdi
CFI_RESTORE rdi
.if ARG_SKIP+\addskip > 0
addq $ARG_SKIP+\addskip,%rsp
CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
.endif
.endm
.macro LOAD_ARGS offset
movq \offset(%rsp),%r11
movq \offset+8(%rsp),%r10
movq \offset+16(%rsp),%r9
movq \offset+24(%rsp),%r8
movq \offset+40(%rsp),%rcx
movq \offset+48(%rsp),%rdx
movq \offset+56(%rsp),%rsi
movq \offset+64(%rsp),%rdi
movq \offset+72(%rsp),%rax
.endm
#define REST_SKIP 6*8
.macro SAVE_REST
subq $REST_SKIP,%rsp
CFI_ADJUST_CFA_OFFSET REST_SKIP
movq %rbx,5*8(%rsp)
CFI_REL_OFFSET rbx,5*8
movq %rbp,4*8(%rsp)
CFI_REL_OFFSET rbp,4*8
movq %r12,3*8(%rsp)
CFI_REL_OFFSET r12,3*8
movq %r13,2*8(%rsp)
CFI_REL_OFFSET r13,2*8
movq %r14,1*8(%rsp)
CFI_REL_OFFSET r14,1*8
movq %r15,(%rsp)
CFI_REL_OFFSET r15,0*8
.endm
.macro RESTORE_REST
movq (%rsp),%r15
CFI_RESTORE r15
movq 1*8(%rsp),%r14
CFI_RESTORE r14
movq 2*8(%rsp),%r13
CFI_RESTORE r13
movq 3*8(%rsp),%r12
CFI_RESTORE r12
movq 4*8(%rsp),%rbp
CFI_RESTORE rbp
movq 5*8(%rsp),%rbx
CFI_RESTORE rbx
addq $REST_SKIP,%rsp
CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
.endm
.macro SAVE_ALL
SAVE_ARGS
SAVE_REST
.endm
.macro RESTORE_ALL addskip=0
RESTORE_REST
RESTORE_ARGS 0,\addskip
.endm
.macro icebp
.byte 0xf1
.endm

View file

@ -0,0 +1,206 @@
#ifndef _X86_64_CHECKSUM_H
#define _X86_64_CHECKSUM_H
/*
* Checksums for x86-64
* Copyright 2002 by Andi Kleen, SuSE Labs
* with some code from asm-i386/checksum.h
*/
#include <linux/compiler.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
/**
* csum_fold - Fold and invert a 32bit checksum.
* @sum: 32bit unfolded sum
*
* Fold a 32bit running checksum to 16bit and invert it. This is usually
* the last step before putting a checksum into a packet.
* Make sure not to mix with 64bit checksums.
*
* RETURN VALUE:
* Returns a 32-bit unsigned value
*/
/* _VMKLNX_CODECHECK_: csum_fold */
static inline unsigned int csum_fold(unsigned int sum)
{
__asm__(
" addl %1,%0\n"
" adcl $0xffff,%0"
: "=r" (sum)
: "r" (sum << 16), "0" (sum & 0xffff0000)
);
return (~sum) >> 16;
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*
* By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
* Arnt Gulbrandsen.
*/
#if !defined(__VMKLNX_NO_IP_FAST_CSUM__)
/**
* ip_fast_csum - Compute the IPv4 header checksum efficiently.
* @iph: ipv4 header
* @ihl: length of header / 4
*
* Calculates the 16 bit IPv4 header cheksum for the given IPv4 header.
*
* RETURN VALUE:
* IPv4 header checksum
*/
/* _VMKLNX_CODECHECK_: ip_fast_csum */
static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl)
{
unsigned int sum;
asm( " movl (%1), %0\n"
" subl $4, %2\n"
" jbe 2f\n"
" addl 4(%1), %0\n"
" adcl 8(%1), %0\n"
" adcl 12(%1), %0\n"
"1: adcl 16(%1), %0\n"
" lea 4(%1), %1\n"
" decl %2\n"
" jne 1b\n"
" adcl $0, %0\n"
" movl %0, %2\n"
" shrl $16, %0\n"
" addw %w2, %w0\n"
" adcl $0, %0\n"
" notl %0\n"
"2:"
/* Since the input registers which are loaded with iph and ihl
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
: "=r" (sum), "=r" (iph), "=r" (ihl)
: "1" (iph), "2" (ihl)
: "memory");
return(sum);
}
#endif /* !defined(__VMKLNX_NO_IP_FAST_CSUM__) */
/**
* csum_tcpup_nofold - Compute an IPv4 pseudo header checksum.
* @saddr: source address
* @daddr: destination address
* @len: length of packet
* @proto: ip protocol of packet
* @sum: initial sum to be added in (32bit unfolded)
*
* Returns the pseudo header checksum the input data. Result is
* 32bit unfolded.
*/
static inline unsigned long
csum_tcpudp_nofold(unsigned saddr, unsigned daddr, unsigned short len,
unsigned short proto, unsigned int sum)
{
asm(" addl %1, %0\n"
" adcl %2, %0\n"
" adcl %3, %0\n"
" adcl $0, %0\n"
: "=r" (sum)
: "g" (daddr), "g" (saddr), "g" ((ntohs(len)<<16)+proto*256), "0" (sum));
return sum;
}
/**
* csum_tcpup_magic - Compute an IPv4 pseudo header checksum.
* @saddr: source address
* @daddr: destination address
* @len: length of packet
* @proto: ip protocol of packet
* @sum: initial sum to be added in (32bit unfolded)
*
* Returns the 16bit pseudo header checksum the input data already
* complemented and ready to be filled in.
*/
/* _VMKLNX_CODECHECK_: csum_tcpudp_magic */
static inline unsigned short int
csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
unsigned short len, unsigned short proto, unsigned int sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
/**
* csum_partial - Compute an internet checksum.
* @buff: buffer to be checksummed
* @len: length of buffer.
* @sum: initial sum to be added in (32bit unfolded)
*
* Returns the 32bit unfolded internet checksum of the buffer.
* Before filling it in it needs to be csum_fold()'ed.
* buff should be aligned to a 64bit boundary if possible.
*/
extern unsigned int csum_partial(const unsigned char *buff, unsigned len, unsigned int sum);
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
#define HAVE_CSUM_COPY_USER 1
/* Do not call this directly. Use the wrappers below */
extern unsigned long csum_partial_copy_generic(const unsigned char *src, const unsigned char *dst,
unsigned len,
unsigned sum,
int *src_err_ptr, int *dst_err_ptr);
extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst,
int len, unsigned int isum, int *errp);
extern unsigned int csum_partial_copy_to_user(const unsigned char *src, unsigned char __user *dst,
int len, unsigned int isum, int *errp);
extern unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len,
unsigned int sum);
/* Old names. To be removed. */
#define csum_and_copy_to_user csum_partial_copy_to_user
#define csum_and_copy_from_user csum_partial_copy_from_user
/**
* ip_compute_csum - Compute an 16bit IP checksum.
* @buff: buffer address.
* @len: length of buffer.
*
* Returns the 16bit folded/inverted checksum of the passed buffer.
* Ready to fill in.
*/
extern unsigned short ip_compute_csum(unsigned char * buff, int len);
/**
* csum_ipv6_magic - Compute checksum of an IPv6 pseudo header.
* @saddr: source address
* @daddr: destination address
* @len: length of packet
* @proto: protocol of packet
* @sum: initial sum (32bit unfolded) to be added in
*
* Computes an IPv6 pseudo header checksum. This sum is added the checksum
* into UDP/TCP packets and contains some link layer information.
* Returns the unfolded 32bit checksum.
*/
struct in6_addr;
#define _HAVE_ARCH_IPV6_CSUM 1
extern unsigned short
csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
__u32 len, unsigned short proto, unsigned int sum);
static inline unsigned add32_with_carry(unsigned a, unsigned b)
{
asm("addl %2,%0\n\t"
"adcl $0,%0"
: "=r" (a)
: "0" (a), "r" (b));
return a;
}
#endif

View file

@ -0,0 +1,241 @@
#ifndef _ASM_X86_64_COMPAT_H
#define _ASM_X86_64_COMPAT_H
/*
* Architecture specific compatibility types
*/
#include <linux/types.h>
#include <linux/sched.h>
#define COMPAT_USER_HZ 100
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
typedef s32 compat_time_t;
typedef s32 compat_clock_t;
typedef s32 compat_pid_t;
typedef u16 __compat_uid_t;
typedef u16 __compat_gid_t;
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
typedef u16 compat_mode_t;
typedef u32 compat_ino_t;
typedef u16 compat_dev_t;
typedef s32 compat_off_t;
typedef s64 compat_loff_t;
typedef u16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
typedef s32 compat_timer_t;
typedef s32 compat_key_t;
typedef s32 compat_int_t;
typedef s32 compat_long_t;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
struct compat_timespec {
compat_time_t tv_sec;
s32 tv_nsec;
};
struct compat_timeval {
compat_time_t tv_sec;
s32 tv_usec;
};
struct compat_stat {
compat_dev_t st_dev;
u16 __pad1;
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_nlink_t st_nlink;
__compat_uid_t st_uid;
__compat_gid_t st_gid;
compat_dev_t st_rdev;
u16 __pad2;
u32 st_size;
u32 st_blksize;
u32 st_blocks;
u32 st_atime;
u32 st_atime_nsec;
u32 st_mtime;
u32 st_mtime_nsec;
u32 st_ctime;
u32 st_ctime_nsec;
u32 __unused4;
u32 __unused5;
};
struct compat_flock {
short l_type;
short l_whence;
compat_off_t l_start;
compat_off_t l_len;
compat_pid_t l_pid;
};
#define F_GETLK64 12 /* using 'struct flock64' */
#define F_SETLK64 13
#define F_SETLKW64 14
/*
* IA32 uses 4 byte alignment for 64 bit quantities,
* so we need to pack this structure.
*/
struct compat_flock64 {
short l_type;
short l_whence;
compat_loff_t l_start;
compat_loff_t l_len;
compat_pid_t l_pid;
} __attribute__((packed));
struct compat_statfs {
int f_type;
int f_bsize;
int f_blocks;
int f_bfree;
int f_bavail;
int f_files;
int f_ffree;
compat_fsid_t f_fsid;
int f_namelen; /* SunOS ignores this field. */
int f_frsize;
int f_spare[5];
};
#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
#define COMPAT_RLIM_INFINITY 0xffffffff
typedef u32 compat_old_sigset_t; /* at least 32 bits */
#define _COMPAT_NSIG 64
#define _COMPAT_NSIG_BPW 32
typedef u32 compat_sigset_word;
#define COMPAT_OFF_T_MAX 0x7fffffff
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
struct compat_ipc64_perm {
compat_key_t key;
__compat_uid32_t uid;
__compat_gid32_t gid;
__compat_uid32_t cuid;
__compat_gid32_t cgid;
unsigned short mode;
unsigned short __pad1;
unsigned short seq;
unsigned short __pad2;
compat_ulong_t unused1;
compat_ulong_t unused2;
};
struct compat_semid64_ds {
struct compat_ipc64_perm sem_perm;
compat_time_t sem_otime;
compat_ulong_t __unused1;
compat_time_t sem_ctime;
compat_ulong_t __unused2;
compat_ulong_t sem_nsems;
compat_ulong_t __unused3;
compat_ulong_t __unused4;
};
struct compat_msqid64_ds {
struct compat_ipc64_perm msg_perm;
compat_time_t msg_stime;
compat_ulong_t __unused1;
compat_time_t msg_rtime;
compat_ulong_t __unused2;
compat_time_t msg_ctime;
compat_ulong_t __unused3;
compat_ulong_t msg_cbytes;
compat_ulong_t msg_qnum;
compat_ulong_t msg_qbytes;
compat_pid_t msg_lspid;
compat_pid_t msg_lrpid;
compat_ulong_t __unused4;
compat_ulong_t __unused5;
};
struct compat_shmid64_ds {
struct compat_ipc64_perm shm_perm;
compat_size_t shm_segsz;
compat_time_t shm_atime;
compat_ulong_t __unused1;
compat_time_t shm_dtime;
compat_ulong_t __unused2;
compat_time_t shm_ctime;
compat_ulong_t __unused3;
compat_pid_t shm_cpid;
compat_pid_t shm_lpid;
compat_ulong_t shm_nattch;
compat_ulong_t __unused4;
compat_ulong_t __unused5;
};
/*
* A pointer passed in from user mode. This should not
* be used for syscall parameters, just declare them
* as pointers because the syscall entry code will have
* appropriately comverted them already.
*/
typedef u32 compat_uptr_t;
/**
* compat_ptr - Convert a user provided pointer to a pointer suitable for user access functions
* @uptr: Pointer in user mode
*
* Convert a user provided pointer to a pointer suitable
* for use with user access functions (e.g. copy_from_user,
* copy_to_user, etc.).
*
* RETURN VALUE:
* A (void * ) pointer in user space.
*
*/
/* _VMKLNX_CODECHECK_: compat_ptr */
static inline void __user *compat_ptr(compat_uptr_t uptr)
{
return (void __user *)(unsigned long)uptr;
}
static inline compat_uptr_t ptr_to_compat(void __user *uptr)
{
return (u32)(unsigned long)uptr;
}
/**
* compat_alloc_user_space - <1 Line Description>
* @<arg1>: <first argument description>
* @<arg2>: <second argument description>
*
* <full description>
*
* ESX Deviation Notes:
* <Describes how ESX implementation is different from standard Linux.>
*
*/
/* _VMKLNX_CODECHECK_: compat_alloc_user_space */
#if !defined(__VMKLNX__)
static __inline__ void __user *compat_alloc_user_space(long len)
{
struct pt_regs *regs = task_pt_regs(current);
return (void __user *)regs->rsp - len;
}
#else
extern void *compat_alloc_user_space(long len);
#endif
#if !defined(__VMKLNX__)
static inline int is_compat_task(void)
{
return current_thread_info()->status & TS_COMPAT;
}
#endif /* !defined(__VMKLNX__) */
#endif /* _ASM_X86_64_COMPAT_H */

View file

@ -0,0 +1,20 @@
/* const.h: Macros for dealing with constants. */
#ifndef _X86_64_CONST_H
#define _X86_64_CONST_H
/* Some constant macros are used in both assembler and
* C code. Therefore we cannot annotate them always with
* 'UL' and other type specificers unilaterally. We
* use the following macros to deal with this.
*/
#ifdef __ASSEMBLY__
#define _AC(X,Y) X
#else
#define __AC(X,Y) (X##Y)
#define _AC(X,Y) __AC(X,Y)
#endif
#endif /* !(_X86_64_CONST_H) */

View file

@ -0,0 +1 @@
#include <asm-i386/cpu.h>

View file

@ -0,0 +1,116 @@
/*
* cpufeature.h
*
* Defines x86 CPU feature bits
*/
#ifndef __ASM_X8664_CPUFEATURE_H
#define __ASM_X8664_CPUFEATURE_H
#define NCAPINTS 7 /* N 32-bit words worth of info */
/* Intel-defined CPU features, CPUID level 0x00000001, word 0 */
#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */
#define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */
#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */
#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */
#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */
#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */
#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */
#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */
#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */
#define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */
#define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */
#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */
#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */
#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */
#define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */
#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */
#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
#define X86_FEATURE_PN (0*32+18) /* Processor serial number */
#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */
#define X86_FEATURE_DTES (0*32+21) /* Debug Trace Store */
#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
/* of FPU context), and CR4.OSFXSR available */
#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */
#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */
#define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */
#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */
#define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */
#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
/* Don't duplicate feature flags which are redundant with Intel! */
#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */
#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
#define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSR optimizations */
#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */
#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
#define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */
#define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */
#define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */
/* Other features, Linux-defined mapping, word 3 */
/* This range is used for feature bits which conflict or are synthesized */
#define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */
#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */
#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
#define X86_FEATURE_REP_GOOD (3*32+ 4) /* rep microcode works well on this CPU */
#define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */
#define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */
#define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */
#define X86_FEATURE_UP (3*32+8) /* SMP kernel running on UP */
#define X86_FEATURE_ARCH_PERFMON (3*32+9) /* Intel Architectural PerfMon */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
#define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */
#define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */
#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */
#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */
#define X86_FEATURE_CID (4*32+10) /* Context ID */
#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
#define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */
#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */
#define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */
#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */
/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
#define cpu_has(c, bit) test_bit(bit, (c)->x86_capability)
#define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability)
#define cpu_has_fpu 1
#define cpu_has_vme 0
#define cpu_has_de 1
#define cpu_has_pse 1
#define cpu_has_tsc 1
#define cpu_has_pae ___BUG___
#define cpu_has_pge 1
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
#define cpu_has_mtrr 1
#define cpu_has_mmx 1
#define cpu_has_fxsr 1
#define cpu_has_xmm 1
#define cpu_has_xmm2 1
#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
#define cpu_has_mp 1 /* XXX */
#define cpu_has_k6_mtrr 0
#define cpu_has_cyrix_arr 0
#define cpu_has_centaur_mcr 0
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH)
#endif /* __ASM_X8664_CPUFEATURE_H */

View file

@ -0,0 +1,6 @@
#ifndef __X86_64_CPUTIME_H
#define __X86_64_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __X86_64_CPUTIME_H */

View file

@ -0,0 +1,75 @@
#ifndef _ASM_X86_64_CRASH_H
#define _ASM_X86_64_CRASH_H
/*
* linux/include/asm-x86_64/crash.h
*
* Copyright (c) 2004 Red Hat, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#ifdef __KERNEL__
#include <linux/mm.h>
#include <linux/highmem.h>
#include <asm/mmzone.h>
extern int page_is_ram(unsigned long);
static inline void *
map_virtual(u64 offset, struct page **pp)
{
struct page *page;
unsigned long pfn;
void *vaddr;
pfn = (unsigned long)(offset >> PAGE_SHIFT);
if (!page_is_ram(pfn)) {
printk(KERN_INFO
"crash memory driver: !page_is_ram(pfn: %lx)\n", pfn);
return NULL;
}
if (!pfn_valid(pfn)) {
printk(KERN_INFO
"crash memory driver: invalid pfn: %lx )\n", pfn);
return NULL;
}
page = pfn_to_page(pfn);
vaddr = kmap(page);
if (!vaddr) {
printk(KERN_INFO
"crash memory driver: pfn: %lx kmap(page: %lx) failed\n",
pfn, (unsigned long)page);
return NULL;
}
*pp = page;
return (vaddr + (offset & (PAGE_SIZE-1)));
}
static inline void unmap_virtual(struct page *page)
{
kunmap(page);
}
#endif /* __KERNEL__ */
#endif /* _ASM_X86_64_CRASH_H */

View file

@ -0,0 +1,86 @@
/*
* include/asm-x86_64/crashdump.h
*
* Copyright (C) Hitachi, Ltd. 2004
* Written by Satoshi Oshima (oshima@sdl.hitachi.co.jp)
*
* Derived from include/asm-i386/diskdump.h
* Copyright (c) 2004 FUJITSU LIMITED
* Copyright (c) 2003 Red Hat, Inc. All rights reserved.
*
*/
#ifndef _ASM_X86_64_CRASHDUMP_H
#define _ASM_X86_64_CRASHDUMP_H
#ifdef __KERNEL__
#include <linux/elf.h>
extern int page_is_ram(unsigned long);
extern unsigned long next_ram_page(unsigned long);
#define platform_fix_regs() \
{ \
unsigned long rsp; \
unsigned short ss; \
rsp = (unsigned long) ((char *)regs + sizeof (struct pt_regs)); \
ss = __KERNEL_DS; \
if (regs->cs & 3) { \
rsp = regs->rsp; \
ss = regs->ss & 0xffff; \
} \
myregs = *regs; \
myregs.rsp = rsp; \
myregs.ss = (myregs.ss & (~0xffff)) | ss; \
}
#define platform_timestamp(x) rdtscll(x)
#define platform_freeze_cpu() \
{ \
for (;;) local_irq_disable(); \
}
static inline void platform_init_stack(void **stackptr)
{
struct page *page;
if ((page = alloc_page(GFP_KERNEL)))
*stackptr = (void *)page_address(page);
if (*stackptr)
memset(*stackptr, 0, PAGE_SIZE);
else
printk(KERN_WARNING
"crashdump: unable to allocate separate stack\n");
}
#define platform_cleanup_stack(stackptr) \
do { \
if (stackptr) \
free_page((unsigned long)stackptr); \
} while (0)
typedef asmlinkage void (*crashdump_func_t)(struct pt_regs *, void *);
static inline void platform_start_crashdump(void *stackptr,
crashdump_func_t dumpfunc,
struct pt_regs *regs)
{
static unsigned long old_rsp;
unsigned long new_rsp;
if (stackptr) {
asm volatile("movq %%rsp,%0" : "=r" (old_rsp));
new_rsp = (unsigned long)stackptr + PAGE_SIZE;
asm volatile("movq %0,%%rsp" :: "r" (new_rsp));
dumpfunc(regs, NULL);
asm volatile("movq %0,%%rsp" :: "r" (old_rsp));
} else
dumpfunc(regs, NULL);
}
#endif /* __KERNEL__ */
#endif /* _ASM_X86_64_CRASHDUMP_H */

View file

@ -0,0 +1,57 @@
/*
* Portions Copyright 2008, 2009 VMware, Inc.
*/
#ifndef _X86_64_CURRENT_H
#define _X86_64_CURRENT_H
#if !defined(__ASSEMBLY__)
struct task_struct;
#include <asm/pda.h>
/**
* get_current - Gets current task pointer for the current world.
*
* Gets current task pointer for the current world.
*
* RETURN VALUE:
* Pointer to the task struct of the running process.
*/
/* _VMKLNX_CODECHECK_: get_current */
static inline struct task_struct *get_current(void)
{
#if defined(__VMKLNX__)
extern struct task_struct *vmklnx_GetCurrent(void);
return vmklnx_GetCurrent();
#else /* !defined(__VMKLNX__) */
struct task_struct *t = read_pda(pcurrent);
return t;
#endif /* defined(__VMKLNX__) */
}
/**
* current - Get current task pointer of current task
*
* Returns a pointer to the task struct of the running task
*
* SYNOPSIS:
* #define current
*
* RETURN VALUE:
* Pointer to current task of type task_struct
*
*/
/* _VMKLNX_CODECHECK_: current */
#define current get_current()
#else
#ifndef ASM_OFFSET_H
#include <asm/asm-offsets.h>
#endif
#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
#endif
#endif /* !(_X86_64_CURRENT_H) */

View file

@ -0,0 +1,65 @@
#ifndef _X86_64_DEBUGREG_H
#define _X86_64_DEBUGREG_H
/* Indicate the register numbers for a number of the specific
debug registers. Registers 0-3 contain the addresses we wish to trap on */
#define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */
#define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */
#define DR_STATUS 6 /* u_debugreg[DR_STATUS] */
#define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */
/* Define a few things for the status register. We can use this to determine
which debugging register was responsible for the trap. The other bits
are either reserved or not of interest to us. */
#define DR_TRAP0 (0x1) /* db0 */
#define DR_TRAP1 (0x2) /* db1 */
#define DR_TRAP2 (0x4) /* db2 */
#define DR_TRAP3 (0x8) /* db3 */
#define DR_STEP (0x4000) /* single-step */
#define DR_SWITCH (0x8000) /* task switch */
/* Now define a bunch of things for manipulating the control register.
The top two bytes of the control register consist of 4 fields of 4
bits - each field corresponds to one of the four debug registers,
and indicates what types of access we trap on, and how large the data
field is that we are looking at */
#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
#define DR_CONTROL_SIZE 4 /* 4 control bits per register */
#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */
#define DR_RW_WRITE (0x1)
#define DR_RW_READ (0x3)
#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
#define DR_LEN_2 (0x4)
#define DR_LEN_4 (0xC)
#define DR_LEN_8 (0x8)
/* The low byte to the control register determine which registers are
enabled. There are 4 fields of two bits. One bit is "local", meaning
that the processor will reset the bit after a task switch and the other
is global meaning that we have to explicitly reset the bit. With linux,
you can use either one, since we explicitly zero the register when we enter
kernel mode. */
#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */
#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */
#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */
#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */
#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
/* The second byte to the control register has a few special things.
We can slow the instruction pipeline for instructions coming via the
gdt or the ldt if we want to. I am not sure why this is an advantage */
#define DR_CONTROL_RESERVED (0xFFFFFFFF0000FC00UL) /* Reserved */
#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
#endif

View file

@ -0,0 +1,49 @@
/*
* Portions Copyright 2008 VMware, Inc.
*/
#ifndef _X8664_DELAY_H
#define _X8664_DELAY_H
/*
* Copyright (C) 1993 Linus Torvalds
*
* Delay routines calling functions in arch/x86_64/lib/delay.c
*/
#if defined(__VMKLNX__)
#include "vmkapi.h"
#define udelay(n) vmk_DelayUsecs((vmk_uint32) (n))
#else /* !defined(__VMKLNX__) */
extern void __bad_udelay(void);
extern void __bad_ndelay(void);
extern void __udelay(unsigned long usecs);
extern void __ndelay(unsigned long usecs);
extern void __const_udelay(unsigned long usecs);
extern void __delay(unsigned long loops);
/**
* udelay - delay for n microseconds
* @n: number of microseconds to delay
*
* SYNOPSIS:
* #define udelay(n)
*
* RETURN VALUE:
* none
*
*/
/* _VMKLNX_CODECHECK_: udelay */
#define udelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \
__udelay(n))
#define ndelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
__ndelay(n))
#endif /* defined(__VMKLNX__) */
#endif /* defined(_X8664_DELAY_H) */

View file

@ -0,0 +1,238 @@
/* Written 2000 by Andi Kleen */
#ifndef __ARCH_DESC_H
#define __ARCH_DESC_H
#include <linux/threads.h>
#include <asm/ldt.h>
#ifndef __ASSEMBLY__
#include <linux/string.h>
#include <linux/smp.h>
#include <asm/segment.h>
#include <asm/mmu.h>
// 8 byte segment descriptor
struct desc_struct {
u16 limit0;
u16 base0;
unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
} __attribute__((packed));
struct n_desc_struct {
unsigned int a,b;
};
extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
enum {
GATE_INTERRUPT = 0xE,
GATE_TRAP = 0xF,
GATE_CALL = 0xC,
};
// 16byte gate
struct gate_struct {
u16 offset_low;
u16 segment;
unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
u16 offset_middle;
u32 offset_high;
u32 zero1;
} __attribute__((packed));
#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF)
#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
enum {
DESC_TSS = 0x9,
DESC_LDT = 0x2,
};
// LDT or TSS descriptor in the GDT. 16 bytes.
struct ldttss_desc {
u16 limit0;
u16 base0;
unsigned base1 : 8, type : 5, dpl : 2, p : 1;
unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
u32 base3;
u32 zero1;
} __attribute__((packed));
struct desc_ptr {
unsigned short size;
unsigned long address;
} __attribute__((packed)) ;
#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
#define clear_LDT() asm volatile("lldt %w0"::"r" (0))
/*
* This is the ldt that every process will get unless we need
* something other than this.
*/
extern struct desc_struct default_ldt[];
extern struct gate_struct idt_table[];
extern struct desc_ptr cpu_gdt_descr[];
/* the cpu gdt accessor */
#define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address)
static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)
{
struct gate_struct s;
s.offset_low = PTR_LOW(func);
s.segment = __KERNEL_CS;
s.ist = ist;
s.p = 1;
s.dpl = dpl;
s.zero0 = 0;
s.zero1 = 0;
s.type = type;
s.offset_middle = PTR_MIDDLE(func);
s.offset_high = PTR_HIGH(func);
/* does not need to be atomic because it is only done once at setup time */
memcpy(adr, &s, 16);
}
static inline void set_intr_gate(int nr, void *func)
{
BUG_ON((unsigned)nr > 0xFF);
_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0);
}
static inline void set_intr_gate_ist(int nr, void *func, unsigned ist)
{
BUG_ON((unsigned)nr > 0xFF);
_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist);
}
static inline void set_system_gate(int nr, void *func)
{
BUG_ON((unsigned)nr > 0xFF);
_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
}
static inline void set_system_gate_ist(int nr, void *func, unsigned ist)
{
_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
}
static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
unsigned size)
{
struct ldttss_desc d;
memset(&d,0,sizeof(d));
d.limit0 = size & 0xFFFF;
d.base0 = PTR_LOW(tss);
d.base1 = PTR_MIDDLE(tss) & 0xFF;
d.type = type;
d.p = 1;
d.limit1 = (size >> 16) & 0xF;
d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF;
d.base3 = PTR_HIGH(tss);
memcpy(ptr, &d, 16);
}
static inline void set_tss_desc(unsigned cpu, void *addr)
{
/*
* sizeof(unsigned long) coming from an extra "long" at the end
* of the iobitmap. See tss_struct definition in processor.h
*
* -1? seg base+limit should be pointing to the address of the
* last valid byte
*/
set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS],
(unsigned long)addr, DESC_TSS,
IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
}
static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
{
set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_LDT], (unsigned long)addr,
DESC_LDT, size * 8 - 1);
}
static inline void set_seg_base(unsigned cpu, int entry, void *base)
{
struct desc_struct *d = &cpu_gdt(cpu)[entry];
u32 addr = (u32)(u64)base;
BUG_ON((u64)base >> 32);
d->base0 = addr & 0xffff;
d->base1 = (addr >> 16) & 0xff;
d->base2 = (addr >> 24) & 0xff;
}
#define LDT_entry_a(info) \
((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
/* Don't allow setting of the lm bit. It is useless anyways because
64bit system calls require __USER_CS. */
#define LDT_entry_b(info) \
(((info)->base_addr & 0xff000000) | \
(((info)->base_addr & 0x00ff0000) >> 16) | \
((info)->limit & 0xf0000) | \
(((info)->read_exec_only ^ 1) << 9) | \
((info)->contents << 10) | \
(((info)->seg_not_present ^ 1) << 15) | \
((info)->seg_32bit << 22) | \
((info)->limit_in_pages << 23) | \
((info)->useable << 20) | \
/* ((info)->lm << 21) | */ \
0x7000)
#define LDT_empty(info) (\
(info)->base_addr == 0 && \
(info)->limit == 0 && \
(info)->contents == 0 && \
(info)->read_exec_only == 1 && \
(info)->seg_32bit == 0 && \
(info)->limit_in_pages == 0 && \
(info)->seg_not_present == 1 && \
(info)->useable == 0 && \
(info)->lm == 0)
#if TLS_SIZE != 24
# error update this code.
#endif
static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
{
u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
gdt[0] = t->tls_array[0];
gdt[1] = t->tls_array[1];
gdt[2] = t->tls_array[2];
}
/*
* load one particular LDT into the current CPU
*/
static inline void load_LDT_nolock (mm_context_t *pc, int cpu)
{
int count = pc->size;
if (likely(!count)) {
clear_LDT();
return;
}
set_ldt_desc(cpu, pc->ldt, count);
load_LDT_desc();
}
static inline void load_LDT(mm_context_t *pc)
{
int cpu = get_cpu();
load_LDT_nolock(pc, cpu);
put_cpu();
}
extern struct desc_ptr idt_descr;
#endif /* !__ASSEMBLY__ */
#endif

View file

@ -0,0 +1,13 @@
#ifndef _ASM_X86_DEVICE_H
#define _ASM_X86_DEVICE_H
struct dev_archdata {
#ifdef CONFIG_ACPI
void *acpi_handle;
#endif
#ifdef CONFIG_DMAR
void *iommu; /* hook for IOMMU specific extension */
#endif
};
#endif /* _ASM_X86_DEVICE_H */

View file

@ -0,0 +1,44 @@
/*
* include/asm-x86_64/diskdump.h
*
* Copyright (C) Hitachi, Ltd. 2004
* Written by Satoshi Oshima (oshima@sdl.hitachi.co.jp)
*
* Derived from include/asm-i386/diskdump.h
* Copyright (c) 2004 FUJITSU LIMITED
* Copyright (c) 2003 Red Hat, Inc. All rights reserved.
*
*/
#ifndef _ASM_X86_64_DISKDUMP_H
#define _ASM_X86_64_DISKDUMP_H
#ifdef __KERNEL__
#include <linux/elf.h>
#include <asm/crashdump.h>
const static int platform_supports_diskdump = 1;
struct disk_dump_sub_header {
elf_gregset_t elf_regs;
};
#define size_of_sub_header() ((sizeof(struct disk_dump_sub_header) + PAGE_SIZE - 1) / DUMP_BLOCK_SIZE)
#define write_sub_header() \
({ \
int ret; \
\
ELF_CORE_COPY_REGS(dump_sub_header.elf_regs, (&myregs)); \
clear_page(scratch); \
memcpy(scratch, &dump_sub_header, sizeof(dump_sub_header)); \
\
if ((ret = write_blocks(dump_part, 2, scratch, 1)) >= 0) \
ret = 1; /* size of sub header in page */; \
ret; \
})
#endif /* __KERNEL__ */
#endif /* _ASM_X86_64_DISKDUMP_H */

View file

@ -0,0 +1 @@
#include <asm-generic/div64.h>

View file

@ -0,0 +1,428 @@
/*
* Portions Copyright 2008 VMware, Inc.
*/
#ifndef _X8664_DMA_MAPPING_H
#define _X8664_DMA_MAPPING_H 1
/*
* IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
* documentation.
*/
#include <asm/scatterlist.h>
#include <asm/io.h>
#include <asm/swiotlb.h>
struct dma_mapping_ops {
int (*mapping_error)(dma_addr_t dma_addr);
void* (*alloc_coherent)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
void (*free_coherent)(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
dma_addr_t (*map_single)(struct device *hwdev, void *ptr,
size_t size, int direction);
/* like map_single, but doesn't check the device mask */
dma_addr_t (*map_simple)(struct device *hwdev, char *ptr,
size_t size, int direction);
void (*unmap_single)(struct device *dev, dma_addr_t addr,
size_t size, int direction);
void (*sync_single_for_cpu)(struct device *hwdev,
dma_addr_t dma_handle, size_t size,
int direction);
void (*sync_single_for_device)(struct device *hwdev,
dma_addr_t dma_handle, size_t size,
int direction);
void (*sync_single_range_for_cpu)(struct device *hwdev,
dma_addr_t dma_handle, unsigned long offset,
size_t size, int direction);
void (*sync_single_range_for_device)(struct device *hwdev,
dma_addr_t dma_handle, unsigned long offset,
size_t size, int direction);
void (*sync_sg_for_cpu)(struct device *hwdev,
struct scatterlist *sg, int nelems,
int direction);
void (*sync_sg_for_device)(struct device *hwdev,
struct scatterlist *sg, int nelems,
int direction);
int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
int nents, int direction);
void (*unmap_sg)(struct device *hwdev,
struct scatterlist *sg, int nents,
int direction);
int (*dma_supported)(struct device *hwdev, u64 mask);
int is_phys;
};
extern dma_addr_t bad_dma_address;
extern struct dma_mapping_ops* dma_ops;
extern int iommu_merge;
static inline int valid_dma_direction(int dma_direction)
{
return ((dma_direction == DMA_BIDIRECTIONAL) ||
(dma_direction == DMA_TO_DEVICE) ||
(dma_direction == DMA_FROM_DEVICE));
}
extern void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle);
#if !defined(__VMKLNX__)
/**
* dma_mapping_error - Check a bus address for a mapping error
* @dma_addr: bus address previously returned by dma_map_single or dma_map_page
*
* Performs a platform-specific check to determine if the
* mapped bus address is valid for use with DMA
*
* RETURN VALUE:
* TRUE if the bus address incurred a mapping error, FALSE otherwise
*
* SEE ALSO:
* dma_map_single
*
*/
static inline int dma_mapping_error(dma_addr_t dma_addr)
{
if (dma_ops->mapping_error)
return dma_ops->mapping_error(dma_addr);
return (dma_addr == bad_dma_address);
}
/**
* dma_map_single - Map a buffer for streaming DMA use with a given device
* @hwdev: device to be used in the DMA operation
* @ptr: virtual address of the buffer
* @size: length of the buffer, in bytes
* @direction: direction of the DMA to set up
*
* Sets up any platform-specific bus connectivity required to
* make a buffer usable for a DMA operation and returns a mapped bus address
* for the buffer. The mapped address should be checked for an error using
* dma_mapping_error. When the buffer will no longer be used for DMA, the
* buffer should be unmapped using dma_unmap_single.
* 'direction' can be any one of
* DMA_BIDIRECTIONAL (the device either reads or writes the buffer),
* DMA_TO_DEVICE (the device reads the buffer),
* DMA_FROM_DEVICE (the device writes the buffer), or
* DMA_NONE (neither reads nor writes should be allowed - may not be supported
* on all platforms)
*
* RETURN VALUE:
* A bus address accessible by the device
*
* SEE ALSO:
* dma_unmap_single, dma_mapping_error
*
*/
static inline dma_addr_t
dma_map_single(struct device *hwdev, void *ptr, size_t size,
int direction)
{
BUG_ON(!valid_dma_direction(direction));
return dma_ops->map_single(hwdev, ptr, size, direction);
}
#define dma_map_page(dev,page,offset,size,dir) \
dma_map_single((dev), page_address(page)+(offset), (size), (dir))
#define dma_unmap_page dma_unmap_single
/**
* dma_unmap_single - Tear down a streaming DMA mapping for a buffer
* @dev: device that had been used in the DMA operation
* @addr: mapped bus address for the buffer, previously returned by dma_map_single
* @size: length of the buffer, in bytes
* @direction: direction of the DMA that was set up by dma_map_single
*
* Tears down the platform-specific bus connectivity that was needed to make a
* buffer usable for DMA.
* 'direction' can be any one of
* DMA_BIDIRECTIONAL (the device either reads or writes the buffer),
* DMA_TO_DEVICE (the device reads the buffer),
* DMA_FROM_DEVICE (the device writes the buffer), or
* DMA_NONE (neither reads nor writes should be allowed)
*
* RETURN VALUE:
* Does not return any value
*
* SEE ALSO:
* dma_map_single
*
*/
static inline void
dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
int direction)
{
BUG_ON(!valid_dma_direction(direction));
dma_ops->unmap_single(dev, addr, size, direction);
}
/**
* dma_sync_single_for_cpu - Allow the CPU to access a buffer that is currently DMA-mapped
* @hwdev: device to which the buffer is mapped
* @dma_handle: bus address of the buffer
* @size: length of the buffer, in bytes
* @direction: direction of the existing DMA mapping
*
* Transfers access ownership for a buffer that has been set up for DMA back to
* the CPU and synchronizes any changes that have been made by the device with
* the CPU. The bus mapping that was created with dma_map_single is not
* destroyed. Afterward, the CPU can safely read and write the buffer. The
* device should not access the buffer until access rights have been
* transferred back to the device using dma_sync_single_for_device.
*
* RETURN VALUE:
* Does not return any value
*
* SEE ALSO:
* dma_sync_single_for_device, dma_map_single
*
*/
static inline void
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
BUG_ON(!valid_dma_direction(direction));
if (dma_ops->sync_single_for_cpu)
dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
direction);
flush_write_buffers();
}
/**
* dma_sync_single_for_device - Re-enable device access to a DMA-mapped buffer
* @hwdev: device to which the buffer is mapped
* @dma_handle: bus address of the buffer
* @size: length of the buffer, in bytes
* @direction: direction of the existing DMA mapping
*
* Transfers access ownership back to a device from the CPU and synchronizes
* any changes that the CPU has made so that they will be visible by the device.
*
* RETURN VALUE:
* Does not return any value
*
* SEE ALSO:
* dma_sync_single_for_cpu, dma_map_single
*
*/
static inline void
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
BUG_ON(!valid_dma_direction(direction));
if (dma_ops->sync_single_for_device)
dma_ops->sync_single_for_device(hwdev, dma_handle, size,
direction);
flush_write_buffers();
}
static inline void
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
unsigned long offset, size_t size, int direction)
{
BUG_ON(!valid_dma_direction(direction));
if (dma_ops->sync_single_range_for_cpu) {
dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
}
flush_write_buffers();
}
static inline void
dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
unsigned long offset, size_t size, int direction)
{
BUG_ON(!valid_dma_direction(direction));
if (dma_ops->sync_single_range_for_device)
dma_ops->sync_single_range_for_device(hwdev, dma_handle,
offset, size, direction);
flush_write_buffers();
}
static inline void
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
BUG_ON(!valid_dma_direction(direction));
if (dma_ops->sync_sg_for_cpu)
dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
flush_write_buffers();
}
static inline void
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
BUG_ON(!valid_dma_direction(direction));
if (dma_ops->sync_sg_for_device) {
dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
}
flush_write_buffers();
}
/**
* dma_map_sg - Map scatter/gather buffers for DMA use with a hardware device
* @hwdev: device to be used in the DMA operations
* @sg: start of the scatter/gather list of entries to be mapped
* @nents: number of elements in the list to be mapped
* @direction: direction of the DMA, with values as in dma_map_single
*
* Sets up the platform-specific bus connectivity for each of the buffers in a
* scatterlist so that they may be used in DMA with the given hardware device.
* dma_unmap_sg should be used on these scatterlist elements when they will no
* longer be used with DMA.
*
* RETURN VALUE:
* 0 if a failure was encountered, nents if the mappings succeeded
*
* SEE ALSO:
* dma_map_single, dma_unmap_sg
*
*/
static inline int
dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
{
BUG_ON(!valid_dma_direction(direction));
return dma_ops->map_sg(hwdev, sg, nents, direction);
}
/**
* dma_unmap_sg - Unmap scatter/gather buffers that were previously mapped for DMA
* @hwdev: device to which these buffers have been mapped
* @sg: start of the scatter/gather list of entries to be unmapped
* @nents: number of elements in the list to be unmapped
* @direction: direction of the existing DMA mapping
*
* Tears down the platform-specific bus connectivity for each of the buffers in
* a scatterlist that had been previously set up for DMA using
* dma_map_sg.
*
* RETURN VALUE:
* Does not return any value
*
* SEE ALSO:
* dma_map_sg, dma_map_single
*
*/
static inline void
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
int direction)
{
BUG_ON(!valid_dma_direction(direction));
/* take out this ifdef block when we have iommu support */
dma_ops->unmap_sg(hwdev, sg, nents, direction);
}
extern int dma_supported(struct device *hwdev, u64 mask);
#else /* defined(__VMKLNX__) */
extern int dma_mapping_error(dma_addr_t dma_addr);
/* In vmklinux, virt_to_phys & phys_to_virt are real function calls, so
* until support for IOMMU is added, it's good to avoid call to
* dma_map_single() here since nommu_map_single() calls virt_to_phys().
*/
#define dma_map_page(dev,page,offset,size,dir) \
(((dma_addr_t) page_to_phys(page)) + (offset))
#define dma_unmap_page(dev,dma_address,size,dir) do { } while(0)
extern dma_addr_t
dma_map_single(struct device *hwdev, void *ptr, size_t size,
int direction);
extern void
dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
int direction);
extern void
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
size_t size, int direction);
extern void
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
size_t size, int direction);
extern void
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
unsigned long offset, size_t size, int direction);
extern void
dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
unsigned long offset, size_t size, int direction);
extern void
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
int nelems, int direction);
extern void
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, int direction);
extern int
dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction);
extern void
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
int direction);
struct vmklnx_codma;
extern struct vmklnx_codma vmklnx_codma;
extern int vmklnx_dma_supported(struct vmklnx_codma *codma,
struct device *hwdev, u64 mask);
static inline int dma_supported(struct device *hwdev, u64 mask)
{
return vmklnx_dma_supported(&vmklnx_codma, hwdev, mask);
}
#endif /* defined(__VMKLNX__) */
/* same for gart, swiotlb, and nommu */
static inline int dma_get_cache_alignment(void)
{
return boot_cpu_data.x86_clflush_size;
}
#define dma_is_consistent(h) 1
#if !defined(__VMKLNX__)
extern int dma_set_mask(struct device *dev, u64 mask);
#else /* defined(__VMKLNX__) */
/**
* dma_set_mask - Set a device's allowable DMA mask
* @dev: device whose mask is being set
* @mask: address bitmask
*
* Sets the range in which the device can perform DMA operations.
* The mask, when bitwise-ANDed with an arbitrary machine address, expresses
* the DMA addressability of the device. This mask is used by dma_mapping
* functions (ie, dma_alloc_coherent, dma_pool_alloc) to guarantee that the
* memory allocated is usable by the device.
*
* RETURN VALUE:
* 0 on success
* -EIO if the given mask cannot be used for DMA on the system, or if the
* dma_mask has not been previously initialized.
*
*/
/* _VMKLNX_CODECHECK_: dma_set_mask */
static inline int dma_set_mask(struct device *dev, u64 mask)
{
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
#endif /* !defined(__VMKLNX__) */
static inline void
dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
{
flush_write_buffers();
}
extern struct device fallback_dev;
extern int panic_on_overflow;
#endif /* _X8664_DMA_MAPPING_H */

View file

@ -0,0 +1,304 @@
/*
* linux/include/asm/dma.h: Defines for using and allocating dma channels.
* Written by Hennus Bergman, 1992.
* High DMA channel support & info by Hannu Savolainen
* and John Boyd, Nov. 1992.
*/
#ifndef _ASM_DMA_H
#define _ASM_DMA_H
#include <linux/spinlock.h> /* And spinlocks */
#include <asm/io.h> /* need byte IO */
#include <linux/delay.h>
#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
#define dma_outb outb_p
#else
#define dma_outb outb
#endif
#define dma_inb inb
/*
* NOTES about DMA transfers:
*
* controller 1: channels 0-3, byte operations, ports 00-1F
* controller 2: channels 4-7, word operations, ports C0-DF
*
* - ALL registers are 8 bits only, regardless of transfer size
* - channel 4 is not used - cascades 1 into 2.
* - channels 0-3 are byte - addresses/counts are for physical bytes
* - channels 5-7 are word - addresses/counts are for physical words
* - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
* - transfer count loaded to registers is 1 less than actual count
* - controller 2 offsets are all even (2x offsets for controller 1)
* - page registers for 5-7 don't use data bit 0, represent 128K pages
* - page registers for 0-3 use bit 0, represent 64K pages
*
* DMA transfers are limited to the lower 16MB of _physical_ memory.
* Note that addresses loaded into registers must be _physical_ addresses,
* not logical addresses (which may differ if paging is active).
*
* Address mapping for channels 0-3:
*
* A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
* | ... | | ... | | ... |
* | ... | | ... | | ... |
* | ... | | ... | | ... |
* P7 ... P0 A7 ... A0 A7 ... A0
* | Page | Addr MSB | Addr LSB | (DMA registers)
*
* Address mapping for channels 5-7:
*
* A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
* | ... | \ \ ... \ \ \ ... \ \
* | ... | \ \ ... \ \ \ ... \ (not used)
* | ... | \ \ ... \ \ \ ... \
* P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
* | Page | Addr MSB | Addr LSB | (DMA registers)
*
* Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
* and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
* the hardware level, so odd-byte transfers aren't possible).
*
* Transfer count (_not # bytes_) is limited to 64K, represented as actual
* count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
* and up to 128K bytes may be transferred on channels 5-7 in one operation.
*
*/
#define MAX_DMA_CHANNELS 8
/* 16MB ISA DMA zone */
#define MAX_DMA_PFN ((16*1024*1024) >> PAGE_SHIFT)
/* 4GB broken PCI/AGP hardware bus master zone */
#define MAX_DMA32_PFN ((4UL*1024*1024*1024) >> PAGE_SHIFT)
/* Compat define for old dma zone */
#define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT))
/* 8237 DMA controllers */
#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
/* DMA controller registers */
#define DMA1_CMD_REG 0x08 /* command register (w) */
#define DMA1_STAT_REG 0x08 /* status register (r) */
#define DMA1_REQ_REG 0x09 /* request register (w) */
#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
#define DMA1_MODE_REG 0x0B /* mode register (w) */
#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
#define DMA2_CMD_REG 0xD0 /* command register (w) */
#define DMA2_STAT_REG 0xD0 /* status register (r) */
#define DMA2_REQ_REG 0xD2 /* request register (w) */
#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
#define DMA2_MODE_REG 0xD6 /* mode register (w) */
#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
#define DMA_ADDR_0 0x00 /* DMA address registers */
#define DMA_ADDR_1 0x02
#define DMA_ADDR_2 0x04
#define DMA_ADDR_3 0x06
#define DMA_ADDR_4 0xC0
#define DMA_ADDR_5 0xC4
#define DMA_ADDR_6 0xC8
#define DMA_ADDR_7 0xCC
#define DMA_CNT_0 0x01 /* DMA count registers */
#define DMA_CNT_1 0x03
#define DMA_CNT_2 0x05
#define DMA_CNT_3 0x07
#define DMA_CNT_4 0xC2
#define DMA_CNT_5 0xC6
#define DMA_CNT_6 0xCA
#define DMA_CNT_7 0xCE
#define DMA_PAGE_0 0x87 /* DMA page registers */
#define DMA_PAGE_1 0x83
#define DMA_PAGE_2 0x81
#define DMA_PAGE_3 0x82
#define DMA_PAGE_5 0x8B
#define DMA_PAGE_6 0x89
#define DMA_PAGE_7 0x8A
#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
#define DMA_AUTOINIT 0x10
extern spinlock_t dma_spin_lock;
static __inline__ unsigned long claim_dma_lock(void)
{
unsigned long flags;
spin_lock_irqsave(&dma_spin_lock, flags);
return flags;
}
static __inline__ void release_dma_lock(unsigned long flags)
{
spin_unlock_irqrestore(&dma_spin_lock, flags);
}
/* enable/disable a specific DMA channel */
static __inline__ void enable_dma(unsigned int dmanr)
{
if (dmanr<=3)
dma_outb(dmanr, DMA1_MASK_REG);
else
dma_outb(dmanr & 3, DMA2_MASK_REG);
}
static __inline__ void disable_dma(unsigned int dmanr)
{
if (dmanr<=3)
dma_outb(dmanr | 4, DMA1_MASK_REG);
else
dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
}
/* Clear the 'DMA Pointer Flip Flop'.
* Write 0 for LSB/MSB, 1 for MSB/LSB access.
* Use this once to initialize the FF to a known state.
* After that, keep track of it. :-)
* --- In order to do that, the DMA routines below should ---
* --- only be used while holding the DMA lock ! ---
*/
static __inline__ void clear_dma_ff(unsigned int dmanr)
{
if (dmanr<=3)
dma_outb(0, DMA1_CLEAR_FF_REG);
else
dma_outb(0, DMA2_CLEAR_FF_REG);
}
/* set mode (above) for a specific DMA channel */
static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
{
if (dmanr<=3)
dma_outb(mode | dmanr, DMA1_MODE_REG);
else
dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
}
/* Set only the page register bits of the transfer address.
* This is used for successive transfers when we know the contents of
* the lower 16 bits of the DMA current address register, but a 64k boundary
* may have been crossed.
*/
static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
{
switch(dmanr) {
case 0:
dma_outb(pagenr, DMA_PAGE_0);
break;
case 1:
dma_outb(pagenr, DMA_PAGE_1);
break;
case 2:
dma_outb(pagenr, DMA_PAGE_2);
break;
case 3:
dma_outb(pagenr, DMA_PAGE_3);
break;
case 5:
dma_outb(pagenr & 0xfe, DMA_PAGE_5);
break;
case 6:
dma_outb(pagenr & 0xfe, DMA_PAGE_6);
break;
case 7:
dma_outb(pagenr & 0xfe, DMA_PAGE_7);
break;
}
}
/* Set transfer address & page bits for specific DMA channel.
* Assumes dma flipflop is clear.
*/
static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
{
set_dma_page(dmanr, a>>16);
if (dmanr <= 3) {
dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
} else {
dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
}
}
/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
* a specific DMA channel.
* You must ensure the parameters are valid.
* NOTE: from a manual: "the number of transfers is one more
* than the initial word count"! This is taken into account.
* Assumes dma flip-flop is clear.
* NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
*/
static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
{
count--;
if (dmanr <= 3) {
dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
} else {
dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
}
}
/* Get DMA residue count. After a DMA transfer, this
* should return zero. Reading this while a DMA transfer is
* still in progress will return unpredictable results.
* If called before the channel has been used, it may return 1.
* Otherwise, it returns the number of _bytes_ left to transfer.
*
* Assumes DMA flip-flop is clear.
*/
static __inline__ int get_dma_residue(unsigned int dmanr)
{
unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
: ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
/* using short to get 16-bit wrap around */
unsigned short count;
count = 1 + dma_inb(io_port);
count += dma_inb(io_port) << 8;
return (dmanr<=3)? count : (count<<1);
}
/* These are in kernel/dma.c: */
extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
extern void free_dma(unsigned int dmanr); /* release it again */
/* From PCI */
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* _ASM_DMA_H */

View file

@ -0,0 +1,34 @@
#ifndef _ASM_DMI_H
#define _ASM_DMI_H 1
#include <asm/io.h>
#if !defined(__VMKLNX__)
extern void *dmi_ioremap(unsigned long addr, unsigned long size);
extern void dmi_iounmap(void *addr, unsigned long size);
#endif
#define DMI_MAX_DATA 2048
extern int dmi_alloc_index;
extern char dmi_alloc_data[DMI_MAX_DATA];
/* This is so early that there is no good way to allocate dynamic memory.
Allocate data in an BSS array. */
static inline void *dmi_alloc(unsigned len)
{
int idx = dmi_alloc_index;
if ((dmi_alloc_index += len) > DMI_MAX_DATA)
return NULL;
return dmi_alloc_data + idx;
}
#if defined(__VMKLNX__)
#define dmi_ioremap(addr, size) ioremap(addr, size)
#define dmi_iounmap(addr, size) iounmap(addr)
#else
#define dmi_ioremap early_ioremap
#define dmi_iounmap early_iounmap
#endif /* #if defined(__VMKLNX__) */
#endif

View file

@ -0,0 +1,51 @@
#ifndef _DWARF2_H
#define _DWARF2_H 1
#ifndef __ASSEMBLY__
#warning "asm/dwarf2.h should be only included in pure assembly files"
#endif
/*
Macros for dwarf2 CFI unwind table entries.
See "as.info" for details on these pseudo ops. Unfortunately
they are only supported in very new binutils, so define them
away for older version.
*/
#ifdef CONFIG_UNWIND_INFO
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
#define CFI_DEF_CFA .cfi_def_cfa
#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
#define CFI_OFFSET .cfi_offset
#define CFI_REL_OFFSET .cfi_rel_offset
#define CFI_REGISTER .cfi_register
#define CFI_RESTORE .cfi_restore
#define CFI_REMEMBER_STATE .cfi_remember_state
#define CFI_RESTORE_STATE .cfi_restore_state
#define CFI_UNDEFINED .cfi_undefined
#else
/* use assembler line comment character # to ignore the arguments. */
#define CFI_STARTPROC #
#define CFI_ENDPROC #
#define CFI_DEF_CFA #
#define CFI_DEF_CFA_REGISTER #
#define CFI_DEF_CFA_OFFSET #
#define CFI_ADJUST_CFA_OFFSET #
#define CFI_OFFSET #
#define CFI_REL_OFFSET #
#define CFI_REGISTER #
#define CFI_RESTORE #
#define CFI_REMEMBER_STATE #
#define CFI_RESTORE_STATE #
#define CFI_UNDEFINED #
#endif
#endif

View file

@ -0,0 +1,67 @@
/*
* structures and definitions for the int 15, ax=e820 memory map
* scheme.
*
* In a nutshell, setup.S populates a scratch table in the
* empty_zero_block that contains a list of usable address/size
* duples. setup.c, this information is transferred into the e820map,
* and in init.c/numa.c, that new information is used to mark pages
* reserved or not.
*/
#ifndef __E820_HEADER
#define __E820_HEADER
#include <linux/mmzone.h>
#define E820MAP 0x2d0 /* our map */
#define E820MAX 128 /* number of entries in E820MAP */
#define E820NR 0x1e8 /* # entries in E820MAP */
#define E820_RAM 1
#define E820_RESERVED 2
#define E820_ACPI 3 /* usable as RAM once ACPI tables have been read */
#define E820_NVS 4
#define HIGH_MEMORY (1024*1024)
#define LOWMEMSIZE() (0x9f000)
#ifndef __ASSEMBLY__
struct e820entry {
u64 addr; /* start of memory segment */
u64 size; /* size of memory segment */
u32 type; /* type of memory segment */
} __attribute__((packed));
struct e820map {
int nr_map;
struct e820entry map[E820MAX];
};
extern unsigned long find_e820_area(unsigned long start, unsigned long end,
unsigned size);
extern void add_memory_region(unsigned long start, unsigned long size,
int type);
extern void setup_memory_region(void);
extern void contig_e820_setup(void);
extern unsigned long e820_end_of_ram(void);
extern void e820_reserve_resources(void);
extern void e820_mark_nosave_regions(void);
extern void e820_print_map(char *who);
extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type);
extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type);
extern void e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end);
extern void e820_setup_gap(void);
extern unsigned long e820_hole_size(unsigned long start_pfn,
unsigned long end_pfn);
extern void __init parse_memopt(char *p, char **end);
extern void __init parse_memmapopt(char *p, char **end);
extern struct e820map e820;
extern unsigned ebda_addr, ebda_size;
#endif/*!__ASSEMBLY__*/
#endif/*__E820_HEADER*/

View file

@ -0,0 +1,18 @@
#ifndef ASM_EDAC_H
#define ASM_EDAC_H
/* ECC atomic, DMA, SMP and interrupt safe scrub function */
static __inline__ void atomic_scrub(void *va, u32 size)
{
unsigned int *virt_addr = va;
u32 i;
for (i = 0; i < size / 4; i++, virt_addr++)
/* Very carefully read and write to memory atomically
* so we are interrupt, DMA and SMP safe.
*/
__asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
}
#endif

View file

@ -0,0 +1,171 @@
#ifndef __ASM_X86_64_ELF_H
#define __ASM_X86_64_ELF_H
/*
* ELF register definitions..
*/
#include <asm/ptrace.h>
#include <asm/user.h>
/* x86-64 relocation types */
#define R_X86_64_NONE 0 /* No reloc */
#define R_X86_64_64 1 /* Direct 64 bit */
#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
#define R_X86_64_PLT32 4 /* 32 bit PLT address */
#define R_X86_64_COPY 5 /* Copy symbol at runtime */
#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
#define R_X86_64_RELATIVE 8 /* Adjust by program base */
#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
offset to GOT */
#define R_X86_64_32 10 /* Direct 32 bit zero extended */
#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
#define R_X86_64_16 12 /* Direct 16 bit zero extended */
#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
#define R_X86_64_8 14 /* Direct 8 bit sign extended */
#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
#define R_X86_64_NUM 16
typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_i387_struct elf_fpregset_t;
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS64
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_X86_64
#ifdef __KERNEL__
#include <asm/processor.h>
#include <asm/compat.h>
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) \
((x)->e_machine == EM_X86_64)
/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
contains a pointer to a function which might be registered using `atexit'.
This provides a mean for the dynamic linker to call DT_FINI functions for
shared libraries that have been loaded before the code runs.
A value of 0 tells we have no such handler.
We might as well make sure everything else is cleared too (except for %esp),
just to make things more deterministic.
*/
#define ELF_PLAT_INIT(_r, load_addr) do { \
struct task_struct *cur = current; \
(_r)->rbx = 0; (_r)->rcx = 0; (_r)->rdx = 0; \
(_r)->rsi = 0; (_r)->rdi = 0; (_r)->rbp = 0; \
(_r)->rax = 0; \
(_r)->r8 = 0; \
(_r)->r9 = 0; \
(_r)->r10 = 0; \
(_r)->r11 = 0; \
(_r)->r12 = 0; \
(_r)->r13 = 0; \
(_r)->r14 = 0; \
(_r)->r15 = 0; \
cur->thread.fs = 0; cur->thread.gs = 0; \
cur->thread.fsindex = 0; cur->thread.gsindex = 0; \
cur->thread.ds = 0; cur->thread.es = 0; \
clear_thread_flag(TIF_IA32); \
} while (0)
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
now struct_user_regs, they are different). Assumes current is the process
getting dumped. */
#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \
unsigned v; \
(pr_reg)[0] = (regs)->r15; \
(pr_reg)[1] = (regs)->r14; \
(pr_reg)[2] = (regs)->r13; \
(pr_reg)[3] = (regs)->r12; \
(pr_reg)[4] = (regs)->rbp; \
(pr_reg)[5] = (regs)->rbx; \
(pr_reg)[6] = (regs)->r11; \
(pr_reg)[7] = (regs)->r10; \
(pr_reg)[8] = (regs)->r9; \
(pr_reg)[9] = (regs)->r8; \
(pr_reg)[10] = (regs)->rax; \
(pr_reg)[11] = (regs)->rcx; \
(pr_reg)[12] = (regs)->rdx; \
(pr_reg)[13] = (regs)->rsi; \
(pr_reg)[14] = (regs)->rdi; \
(pr_reg)[15] = (regs)->orig_rax; \
(pr_reg)[16] = (regs)->rip; \
(pr_reg)[17] = (regs)->cs; \
(pr_reg)[18] = (regs)->eflags; \
(pr_reg)[19] = (regs)->rsp; \
(pr_reg)[20] = (regs)->ss; \
(pr_reg)[21] = current->thread.fs; \
(pr_reg)[22] = current->thread.gs; \
asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \
} while(0);
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
but it's not easy, and we've already done it here. */
#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo.
For the moment, we have only optimizations for the Intel generations,
but that could change... */
/* I'm not sure if we can use '-' here */
#define ELF_PLATFORM ("x86_64")
extern void set_personality_64bit(void);
#define SET_PERSONALITY(ex, ibcs2) set_personality_64bit()
/*
* An executable for which elf_read_implies_exec() returns TRUE will
* have the READ_IMPLIES_EXEC personality flag set automatically.
*/
#define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X)
struct task_struct;
extern int dump_task_regs (struct task_struct *, elf_gregset_t *);
extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
/* 1GB for 64bit, 8MB for 32bit */
#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff)
#endif
#define __HAVE_ARCH_RANDOMIZE_BRK
extern void randomize_brk(unsigned long old_brk);
#endif

View file

@ -0,0 +1,6 @@
#ifndef _ASM_EMERGENCY_RESTART_H
#define _ASM_EMERGENCY_RESTART_H
extern void machine_emergency_restart(void);
#endif /* _ASM_EMERGENCY_RESTART_H */

View file

@ -0,0 +1,6 @@
#ifndef _X8664_ERRNO_H
#define _X8664_ERRNO_H
#include <asm-generic/errno.h>
#endif

View file

@ -0,0 +1 @@
#include <asm-generic/fcntl.h>

View file

@ -0,0 +1,95 @@
/*
* fixmap.h: compile-time virtual memory allocation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Ingo Molnar
*/
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
#include <linux/kernel.h>
#include <asm/apicdef.h>
#include <asm/page.h>
#include <asm/vsyscall.h>
#include <asm/vsyscall32.h>
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process.
*
* these 'compile-time allocated' memory buffers are
* fixed-size 4k pages. (or larger if used with an increment
* highger than 1) use fixmap_set(idx,phys) to associate
* physical memory with fixmap indices.
*
* TLB entries of such buffers will not be flushed across
* task switches.
*/
enum fixed_addresses {
VSYSCALL_LAST_PAGE,
VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
VSYSCALL_HPET,
FIX_HPET_BASE,
#ifdef CONFIG_X86_LOCAL_APIC
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
#endif
#ifdef CONFIG_X86_IO_APIC
FIX_IO_APIC_BASE_0,
FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
#endif
__end_of_fixed_addresses
};
extern void __set_fixmap (enum fixed_addresses idx,
unsigned long phys, pgprot_t flags);
#define set_fixmap(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL)
/*
* Some hardware wants to get fixmapped without caching.
*/
#define set_fixmap_nocache(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
extern void __this_fixmap_does_not_exist(void);
/*
* 'index to address' translation. If anyone tries to use the idx
* directly without translation, we catch the bug with a NULL-deference
* kernel oops. Illegal ranges of incoming indices are caught too.
*/
static __always_inline unsigned long fix_to_virt(const unsigned int idx)
{
/*
* this branch gets completely eliminated after inlining,
* except when someone tries to use fixaddr indices in an
* illegal way. (such as mixing up address types or using
* out-of-range indices).
*
* If it doesn't get removed, the linker will complain
* loudly with a reasonably clear error message..
*/
if (idx >= __end_of_fixed_addresses)
__this_fixmap_does_not_exist();
return __fix_to_virt(idx);
}
#endif

View file

@ -0,0 +1,283 @@
/*
* Architecture specific parts of the Floppy driver
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995
*/
#ifndef __ASM_X86_64_FLOPPY_H
#define __ASM_X86_64_FLOPPY_H
#include <linux/vmalloc.h>
/*
* The DMA channel used by the floppy controller cannot access data at
* addresses >= 16MB
*
* Went back to the 1MB limit, as some people had problems with the floppy
* driver otherwise. It doesn't matter much for performance anyway, as most
* floppy accesses go through the track buffer.
*/
#define _CROSS_64KB(a,s,vdma) \
(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
#define SW fd_routine[use_virtual_dma&1]
#define CSW fd_routine[can_use_virtual_dma & 1]
#define fd_inb(port) inb_p(port)
#define fd_outb(value,port) outb_p(value,port)
#define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy")
#define fd_free_dma() CSW._free_dma(FLOPPY_DMA)
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA)
#define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size)
#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io)
#define FLOPPY_CAN_FALLBACK_ON_NODMA
static int virtual_dma_count;
static int virtual_dma_residue;
static char *virtual_dma_addr;
static int virtual_dma_mode;
static int doing_pdma;
static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
{
register unsigned char st;
#undef TRACE_FLPY_INT
#ifdef TRACE_FLPY_INT
static int calls=0;
static int bytes=0;
static int dma_wait=0;
#endif
if (!doing_pdma)
return floppy_interrupt(irq, dev_id, regs);
#ifdef TRACE_FLPY_INT
if(!calls)
bytes = virtual_dma_count;
#endif
{
register int lcount;
register char *lptr;
st = 1;
for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
lcount; lcount--, lptr++) {
st=inb(virtual_dma_port+4) & 0xa0 ;
if(st != 0xa0)
break;
if(virtual_dma_mode)
outb_p(*lptr, virtual_dma_port+5);
else
*lptr = inb_p(virtual_dma_port+5);
}
virtual_dma_count = lcount;
virtual_dma_addr = lptr;
st = inb(virtual_dma_port+4);
}
#ifdef TRACE_FLPY_INT
calls++;
#endif
if(st == 0x20)
return IRQ_HANDLED;
if(!(st & 0x20)) {
virtual_dma_residue += virtual_dma_count;
virtual_dma_count=0;
#ifdef TRACE_FLPY_INT
printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
virtual_dma_count, virtual_dma_residue, calls, bytes,
dma_wait);
calls = 0;
dma_wait=0;
#endif
doing_pdma = 0;
floppy_interrupt(irq, dev_id, regs);
return IRQ_HANDLED;
}
#ifdef TRACE_FLPY_INT
if(!virtual_dma_count)
dma_wait++;
#endif
return IRQ_HANDLED;
}
static void fd_disable_dma(void)
{
if(! (can_use_virtual_dma & 1))
disable_dma(FLOPPY_DMA);
doing_pdma = 0;
virtual_dma_residue += virtual_dma_count;
virtual_dma_count=0;
}
static int vdma_request_dma(unsigned int dmanr, const char * device_id)
{
return 0;
}
static void vdma_nop(unsigned int dummy)
{
}
static int vdma_get_dma_residue(unsigned int dummy)
{
return virtual_dma_count + virtual_dma_residue;
}
static int fd_request_irq(void)
{
if(can_use_virtual_dma)
return request_irq(FLOPPY_IRQ, floppy_hardint,
IRQF_DISABLED, "floppy", NULL);
else
return request_irq(FLOPPY_IRQ, floppy_interrupt,
IRQF_DISABLED, "floppy", NULL);
}
static unsigned long dma_mem_alloc(unsigned long size)
{
return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY,get_order(size));
}
static unsigned long vdma_mem_alloc(unsigned long size)
{
return (unsigned long) vmalloc(size);
}
#define nodma_mem_alloc(size) vdma_mem_alloc(size)
static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
{
if((unsigned long) addr >= (unsigned long) high_memory)
vfree((void *)addr);
else
free_pages(addr, get_order(size));
}
#define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size)
static void _fd_chose_dma_mode(char *addr, unsigned long size)
{
if(can_use_virtual_dma == 2) {
if((unsigned long) addr >= (unsigned long) high_memory ||
isa_virt_to_bus(addr) >= 0x1000000 ||
_CROSS_64KB(addr, size, 0))
use_virtual_dma = 1;
else
use_virtual_dma = 0;
} else {
use_virtual_dma = can_use_virtual_dma & 1;
}
}
#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size)
static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
{
doing_pdma = 1;
virtual_dma_port = io;
virtual_dma_mode = (mode == DMA_MODE_WRITE);
virtual_dma_addr = addr;
virtual_dma_count = size;
virtual_dma_residue = 0;
return 0;
}
static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
{
#ifdef FLOPPY_SANITY_CHECK
if (CROSS_64KB(addr, size)) {
printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size);
return -1;
}
#endif
/* actual, physical DMA */
doing_pdma = 0;
clear_dma_ff(FLOPPY_DMA);
set_dma_mode(FLOPPY_DMA,mode);
set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr));
set_dma_count(FLOPPY_DMA,size);
enable_dma(FLOPPY_DMA);
return 0;
}
static struct fd_routine_l {
int (*_request_dma)(unsigned int dmanr, const char * device_id);
void (*_free_dma)(unsigned int dmanr);
int (*_get_dma_residue)(unsigned int dummy);
unsigned long (*_dma_mem_alloc) (unsigned long size);
int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
} fd_routine[] = {
{
request_dma,
free_dma,
get_dma_residue,
dma_mem_alloc,
hard_dma_setup
},
{
vdma_request_dma,
vdma_nop,
vdma_get_dma_residue,
vdma_mem_alloc,
vdma_dma_setup
}
};
static int FDC1 = 0x3f0;
static int FDC2 = -1;
/*
* Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
* is needed to prevent corrupted CMOS RAM in case "insmod floppy"
* coincides with another rtc CMOS user. Paul G.
*/
#define FLOPPY0_TYPE ({ \
unsigned long flags; \
unsigned char val; \
spin_lock_irqsave(&rtc_lock, flags); \
val = (CMOS_READ(0x10) >> 4) & 15; \
spin_unlock_irqrestore(&rtc_lock, flags); \
val; \
})
#define FLOPPY1_TYPE ({ \
unsigned long flags; \
unsigned char val; \
spin_lock_irqsave(&rtc_lock, flags); \
val = CMOS_READ(0x10) & 15; \
spin_unlock_irqrestore(&rtc_lock, flags); \
val; \
})
#define N_FDC 2
#define N_DRIVE 8
#define FLOPPY_MOTOR_MASK 0xf0
#define AUTO_DMA
#define EXTRA_FLOPPY_PARAMS
#endif /* __ASM_X86_64_FLOPPY_H */

View file

@ -0,0 +1,13 @@
#ifndef _FPU32_H
#define _FPU32_H 1
struct _fpstate_ia32;
int restore_i387_ia32(struct task_struct *tsk, struct _fpstate_ia32 __user *buf, int fsave);
int save_i387_ia32(struct task_struct *tsk, struct _fpstate_ia32 __user *buf,
struct pt_regs *regs, int fsave);
int get_fpregs32(struct user_i387_ia32_struct *, struct task_struct *);
int set_fpregs32(struct task_struct *, const struct user_i387_ia32_struct *);
#endif

View file

@ -0,0 +1,125 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
#ifdef __KERNEL__
#include <linux/futex.h>
#include <asm/errno.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile ( \
"1: " insn "\n" \
"2: .section .fixup,\"ax\"\n\
3: mov %3, %1\n\
jmp 2b\n\
.previous\n\
.section __ex_table,\"a\"\n\
.align 8\n\
.quad 1b,3b\n\
.previous" \
: "=r" (oldval), "=r" (ret), "=m" (*uaddr) \
: "i" (-EFAULT), "m" (*uaddr), "0" (oparg), "1" (0))
#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile ( \
"1: movl %2, %0\n\
movl %0, %3\n" \
insn "\n" \
"2: " LOCK_PREFIX "cmpxchgl %3, %2\n\
jnz 1b\n\
3: .section .fixup,\"ax\"\n\
4: mov %5, %1\n\
jmp 3b\n\
.previous\n\
.section __ex_table,\"a\"\n\
.align 8\n\
.quad 1b,4b,2b,4b\n\
.previous" \
: "=&a" (oldval), "=&r" (ret), "=m" (*uaddr), \
"=&r" (tem) \
: "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0))
static inline int
futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tem;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
inc_preempt_count();
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ADD:
__futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
uaddr, oparg);
break;
case FUTEX_OP_OR:
__futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ANDN:
__futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, ~oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, oparg);
break;
default:
ret = -ENOSYS;
}
dec_preempt_count();
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
default: ret = -ENOSYS;
}
}
return ret;
}
static inline int
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
__asm__ __volatile__(
"1: " LOCK_PREFIX "cmpxchgl %3, %1 \n"
"2: .section .fixup, \"ax\" \n"
"3: mov %2, %0 \n"
" jmp 2b \n"
" .previous \n"
" .section __ex_table, \"a\" \n"
" .align 8 \n"
" .quad 1b,3b \n"
" .previous \n"
: "=a" (oldval), "=m" (*uaddr)
: "i" (-EFAULT), "r" (newval), "0" (oldval)
: "memory"
);
return oldval;
}
#endif
#endif

View file

@ -0,0 +1,35 @@
#ifndef _ASM_GENAPIC_H
#define _ASM_GENAPIC_H 1
/*
* Copyright 2004 James Cleverdon, IBM.
* Subject to the GNU Public License, v.2
*
* Generic APIC sub-arch data struct.
*
* Hacked for x86-64 by James Cleverdon from i386 architecture code by
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
* James Cleverdon.
*/
struct genapic {
char *name;
u32 int_delivery_mode;
u32 int_dest_mode;
u32 int_delivery_dest; /* for quick IPIs */
int (*apic_id_registered)(void);
cpumask_t (*target_cpus)(void);
void (*init_apic_ldr)(void);
/* ipi */
void (*send_IPI_mask)(cpumask_t mask, int vector);
void (*send_IPI_allbutself)(int vector);
void (*send_IPI_all)(int vector);
/* */
unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
unsigned int (*phys_pkg_id)(int index_msb);
};
extern struct genapic *genapic;
#endif

View file

@ -0,0 +1,20 @@
#ifndef __ASM_HARDIRQ_H
#define __ASM_HARDIRQ_H
#include <linux/threads.h>
#include <linux/irq.h>
#include <asm/pda.h>
#include <asm/apic.h>
#define __ARCH_IRQ_STAT 1
#define local_softirq_pending() read_pda(__softirq_pending)
#define __ARCH_SET_SOFTIRQ_PENDING 1
#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
#define or_softirq_pending(x) or_pda(__softirq_pending, (x))
extern void ack_bad_irq(unsigned int irq);
#endif /* __ASM_HARDIRQ_H */

View file

@ -0,0 +1 @@
#warning this file is obsolete, please do not use it

View file

@ -0,0 +1,71 @@
#ifndef _ASM_X8664_HPET_H
#define _ASM_X8664_HPET_H 1
/*
* Documentation on HPET can be found at:
* http://www.intel.com/ial/home/sp/pcmmspec.htm
* ftp://download.intel.com/ial/home/sp/mmts098.pdf
*/
#define HPET_MMAP_SIZE 1024
#define HPET_ID 0x000
#define HPET_PERIOD 0x004
#define HPET_CFG 0x010
#define HPET_STATUS 0x020
#define HPET_COUNTER 0x0f0
#define HPET_Tn_OFFSET 0x20
#define HPET_Tn_CFG(n) (0x100 + (n) * HPET_Tn_OFFSET)
#define HPET_Tn_ROUTE(n) (0x104 + (n) * HPET_Tn_OFFSET)
#define HPET_Tn_CMP(n) (0x108 + (n) * HPET_Tn_OFFSET)
#define HPET_T0_CFG HPET_Tn_CFG(0)
#define HPET_T0_CMP HPET_Tn_CMP(0)
#define HPET_T1_CFG HPET_Tn_CFG(1)
#define HPET_T1_CMP HPET_Tn_CMP(1)
#define HPET_ID_VENDOR 0xffff0000
#define HPET_ID_LEGSUP 0x00008000
#define HPET_ID_64BIT 0x00002000
#define HPET_ID_NUMBER 0x00001f00
#define HPET_ID_REV 0x000000ff
#define HPET_ID_NUMBER_SHIFT 8
#define HPET_ID_VENDOR_SHIFT 16
#define HPET_ID_VENDOR_8086 0x8086
#define HPET_CFG_ENABLE 0x001
#define HPET_CFG_LEGACY 0x002
#define HPET_LEGACY_8254 2
#define HPET_LEGACY_RTC 8
#define HPET_TN_LEVEL 0x0002
#define HPET_TN_ENABLE 0x0004
#define HPET_TN_PERIODIC 0x0008
#define HPET_TN_PERIODIC_CAP 0x0010
#define HPET_TN_64BIT_CAP 0x0020
#define HPET_TN_SETVAL 0x0040
#define HPET_TN_32BIT 0x0100
#define HPET_TN_ROUTE 0x3e00
#define HPET_TN_FSB 0x4000
#define HPET_TN_FSB_CAP 0x8000
#define HPET_TN_ROUTE_SHIFT 9
#define HPET_TICK_RATE (HZ * 100000UL)
extern int is_hpet_enabled(void);
extern int hpet_rtc_timer_init(void);
extern int apic_is_clustered_box(void);
extern int hpet_use_timer;
#ifdef CONFIG_HPET_EMULATE_RTC
extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask);
extern int hpet_set_rtc_irq_bit(unsigned long bit_mask);
extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec);
extern int hpet_set_periodic_freq(unsigned long freq);
extern int hpet_rtc_dropped_irq(void);
extern int hpet_rtc_timer_init(void);
#endif /* CONFIG_HPET_EMULATE_RTC */
#endif

View file

@ -0,0 +1,135 @@
#ifndef _ASM_HW_IRQ_H
#define _ASM_HW_IRQ_H
/*
* linux/include/asm/hw_irq.h
*
* (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
*
* moved some of the old arch/i386/kernel/irq.h to here. VY
*
* IRQ/IPI changes taken from work by Thomas Radke
* <tomsoft@informatik.tu-chemnitz.de>
*
* hacked by Andi Kleen for x86-64.
*/
#ifndef __ASSEMBLY__
#include <asm/atomic.h>
#include <asm/irq.h>
#include <linux/profile.h>
#include <linux/smp.h>
struct hw_interrupt_type;
#endif
#define NMI_VECTOR 0x02
/*
* IDT vectors usable for external interrupt sources start
* at 0x20:
*/
#define FIRST_EXTERNAL_VECTOR 0x20
#define IA32_SYSCALL_VECTOR 0x80
/*
* Vectors 0x20-0x2f are used for ISA interrupts.
*/
/*
* Special IRQ vectors used by the SMP architecture, 0xf0-0xff
*
* some of the following vectors are 'rare', they are merged
* into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
* TLB, reschedule and local APIC vectors are performance-critical.
*/
#define SPURIOUS_APIC_VECTOR 0xff
#define ERROR_APIC_VECTOR 0xfe
#define RESCHEDULE_VECTOR 0xfd
#define CALL_FUNCTION_VECTOR 0xfc
/* fb free - please don't readd KDB here because it's useless
(hint - think what a NMI bit does to a vector) */
#define THERMAL_APIC_VECTOR 0xfa
#define THRESHOLD_APIC_VECTOR 0xf9
/* f8 free */
#define INVALIDATE_TLB_VECTOR_END 0xf7
#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
#define NUM_INVALIDATE_TLB_VECTORS 8
/*
* Local APIC timer IRQ vector is on a different priority level,
* to work around the 'lost local interrupt if more than 2 IRQ
* sources per level' errata.
*/
#define LOCAL_TIMER_VECTOR 0xef
/*
* First APIC vector available to drivers: (vectors 0x30-0xee)
* we start at 0x31 to spread out vectors evenly between priority
* levels. (0x80 is the syscall vector)
*/
#define FIRST_DEVICE_VECTOR 0x31
/* duplicated in irq.h */
#define FIRST_SYSTEM_VECTOR 0xef
#ifndef __ASSEMBLY__
extern u8 irq_vector[NR_IRQ_VECTORS];
#define IO_APIC_VECTOR(irq) (irq_vector[irq])
#define AUTO_ASSIGN -1
/*
* Various low-level irq details needed by irq.c, process.c,
* time.c, io_apic.c and smp.c
*
* Interrupt entry/exit code at both C and assembly level
*/
extern void disable_8259A_irq(unsigned int irq);
extern void enable_8259A_irq(unsigned int irq);
extern int i8259A_irq_pending(unsigned int irq);
extern void make_8259A_irq(unsigned int irq);
extern void init_8259A(int aeoi);
extern void FASTCALL(send_IPI_self(int vector));
extern void init_VISWS_APIC_irqs(void);
extern void setup_IO_APIC(void);
extern void disable_IO_APIC(void);
extern void print_IO_APIC(void);
extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
extern void send_IPI(int dest, int vector);
extern void setup_ioapic_dest(void);
extern unsigned long io_apic_irqs;
extern atomic_t irq_err_count;
extern atomic_t irq_mis_count;
#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
#define __STR(x) #x
#define STR(x) __STR(x)
#include <asm/ptrace.h>
#define IRQ_NAME2(nr) nr##_interrupt(void)
#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
/*
* SMP has a few special interrupts for IPI messages
*/
#define BUILD_IRQ(nr) \
asmlinkage void IRQ_NAME(nr); \
__asm__( \
"\n.p2align\n" \
"IRQ" #nr "_interrupt:\n\t" \
"push $~(" #nr ") ; " \
"jmp common_interrupt");
#define platform_legacy_irq(irq) ((irq) < 16)
#endif
#endif /* _ASM_HW_IRQ_H */

View file

@ -0,0 +1,210 @@
/*
* include/asm-x86_64/i387.h
*
* Copyright (C) 1994 Linus Torvalds
*
* Pentium III FXSR, SSE support
* General FPU state handling cleanups
* Gareth Hughes <gareth@valinux.com>, May 2000
* x86-64 work by Andi Kleen 2002
*/
#ifndef __ASM_X86_64_I387_H
#define __ASM_X86_64_I387_H
#include <linux/sched.h>
#include <asm/processor.h>
#include <asm/sigcontext.h>
#include <asm/user.h>
#include <asm/thread_info.h>
#include <asm/uaccess.h>
#if !defined(__VMKLNX__)
extern void fpu_init(void);
extern unsigned int mxcsr_feature_mask;
extern void mxcsr_feature_mask_init(void);
extern void init_fpu(struct task_struct *child);
extern int save_i387(struct _fpstate __user *buf);
/*
* FPU lazy state save handling...
*/
#define unlazy_fpu(tsk) do { \
if (task_thread_info(tsk)->status & TS_USEDFPU) \
save_init_fpu(tsk); \
} while (0)
/* Ignore delayed exceptions from user space */
static inline void tolerant_fwait(void)
{
asm volatile("1: fwait\n"
"2:\n"
" .section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 1b,2b\n"
" .previous\n");
}
#define clear_fpu(tsk) do { \
if (task_thread_info(tsk)->status & TS_USEDFPU) { \
tolerant_fwait(); \
task_thread_info(tsk)->status &= ~TS_USEDFPU; \
stts(); \
} \
} while (0)
/*
* ptrace request handers...
*/
extern int get_fpregs(struct user_i387_struct __user *buf,
struct task_struct *tsk);
extern int set_fpregs(struct task_struct *tsk,
struct user_i387_struct __user *buf);
/*
* i387 state interaction
*/
#define get_fpu_mxcsr(t) ((t)->thread.i387.fxsave.mxcsr)
#define get_fpu_cwd(t) ((t)->thread.i387.fxsave.cwd)
#define get_fpu_fxsr_twd(t) ((t)->thread.i387.fxsave.twd)
#define get_fpu_swd(t) ((t)->thread.i387.fxsave.swd)
#define set_fpu_cwd(t,val) ((t)->thread.i387.fxsave.cwd = (val))
#define set_fpu_swd(t,val) ((t)->thread.i387.fxsave.swd = (val))
#define set_fpu_fxsr_twd(t,val) ((t)->thread.i387.fxsave.twd = (val))
#define X87_FSW_ES (1 << 7) /* Exception Summary */
/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
values. The kernel data segment can be sometimes 0 and sometimes
new user value. Both should be ok.
Use the PDA as safe address because it should be already in L1. */
static inline void clear_fpu_state(struct i387_fxsave_struct *fx)
{
if (unlikely(fx->swd & X87_FSW_ES))
asm volatile("fnclex");
alternative_input(ASM_NOP8 ASM_NOP2,
" emms\n" /* clear stack tags */
" fildl %%gs:0", /* load to clear state */
X86_FEATURE_FXSAVE_LEAK);
}
static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
{
int err;
asm volatile("1: rex64/fxrstor (%[fx])\n\t"
"2:\n"
".section .fixup,\"ax\"\n"
"3: movl $-1,%[err]\n"
" jmp 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 1b,3b\n"
".previous"
: [err] "=r" (err)
#if 0 /* See comment in __fxsave_clear() below. */
: [fx] "r" (fx), "m" (*fx), "0" (0));
#else
: [fx] "cdaSDb" (fx), "m" (*fx), "0" (0));
#endif
if (unlikely(err))
init_fpu(current);
return err;
}
static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
{
int err;
asm volatile("1: rex64/fxsave (%[fx])\n\t"
"2:\n"
".section .fixup,\"ax\"\n"
"3: movl $-1,%[err]\n"
" jmp 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 1b,3b\n"
".previous"
: [err] "=r" (err), "=m" (*fx)
#if 0 /* See comment in __fxsave_clear() below. */
: [fx] "r" (fx), "0" (0));
#else
: [fx] "cdaSDb" (fx), "0" (0));
#endif
if (unlikely(err))
__clear_user(fx, sizeof(struct i387_fxsave_struct));
/* No need to clear here because the caller clears USED_MATH */
return err;
}
static inline void __fxsave_clear(struct task_struct *tsk)
{
/* Using "rex64; fxsave %0" is broken because, if the memory operand
uses any extended registers for addressing, a second REX prefix
will be generated (to the assembler, rex64 followed by semicolon
is a separate instruction), and hence the 64-bitness is lost. */
#if 0
/* Using "fxsaveq %0" would be the ideal choice, but is only supported
starting with gas 2.16. */
__asm__ __volatile__("fxsaveq %0"
: "=m" (tsk->thread.i387.fxsave));
#elif 0
/* Using, as a workaround, the properly prefixed form below isn't
accepted by any binutils version so far released, complaining that
the same type of prefix is used twice if an extended register is
needed for addressing (fix submitted to mainline 2005-11-21). */
__asm__ __volatile__("rex64/fxsave %0"
: "=m" (tsk->thread.i387.fxsave));
#else
/* This, however, we can work around by forcing the compiler to select
an addressing mode that doesn't require extended registers. */
__asm__ __volatile__("rex64/fxsave %P2(%1)"
: "=m" (tsk->thread.i387.fxsave)
: "cdaSDb" (tsk),
"i" (offsetof(__typeof__(*tsk),
thread.i387.fxsave)));
#endif
clear_fpu_state(&tsk->thread.i387.fxsave);
}
static inline void kernel_fpu_begin(void)
{
struct thread_info *me = current_thread_info();
preempt_disable();
if (me->status & TS_USEDFPU) {
__fxsave_clear(me->task);
me->status &= ~TS_USEDFPU;
return;
}
clts();
}
static inline void kernel_fpu_end(void)
{
stts();
preempt_enable();
}
static inline void save_init_fpu(struct task_struct *tsk)
{
__fxsave_clear(tsk);
task_thread_info(tsk)->status &= ~TS_USEDFPU;
stts();
}
/*
* This restores directly out of user space. Exceptions are handled.
*/
static inline int restore_i387(struct _fpstate __user *buf)
{
return restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
}
#endif /* !defined(_VMKLNX__) */
#endif /* __ASM_X86_64_I387_H */

View file

@ -0,0 +1,178 @@
#ifndef _ASM_X86_64_IA32_H
#define _ASM_X86_64_IA32_H
#ifdef CONFIG_IA32_EMULATION
#include <linux/compat.h>
/*
* 32 bit structures for IA32 support.
*/
#include <asm/sigcontext32.h>
/* signal.h */
struct sigaction32 {
unsigned int sa_handler; /* Really a pointer, but need to deal
with 32 bits */
unsigned int sa_flags;
unsigned int sa_restorer; /* Another 32 bit pointer */
compat_sigset_t sa_mask; /* A 32 bit mask */
};
struct old_sigaction32 {
unsigned int sa_handler; /* Really a pointer, but need to deal
with 32 bits */
compat_old_sigset_t sa_mask; /* A 32 bit mask */
unsigned int sa_flags;
unsigned int sa_restorer; /* Another 32 bit pointer */
};
typedef struct sigaltstack_ia32 {
unsigned int ss_sp;
int ss_flags;
unsigned int ss_size;
} stack_ia32_t;
struct ucontext_ia32 {
unsigned int uc_flags;
unsigned int uc_link;
stack_ia32_t uc_stack;
struct sigcontext_ia32 uc_mcontext;
compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
/* This matches struct stat64 in glibc2.2, hence the absolutely
* insane amounts of padding around dev_t's.
*/
struct stat64 {
unsigned long long st_dev;
unsigned char __pad0[4];
#define STAT64_HAS_BROKEN_ST_INO 1
unsigned int __st_ino;
unsigned int st_mode;
unsigned int st_nlink;
unsigned int st_uid;
unsigned int st_gid;
unsigned long long st_rdev;
unsigned char __pad3[4];
long long st_size;
unsigned int st_blksize;
long long st_blocks;/* Number 512-byte blocks allocated. */
unsigned st_atime;
unsigned st_atime_nsec;
unsigned st_mtime;
unsigned st_mtime_nsec;
unsigned st_ctime;
unsigned st_ctime_nsec;
unsigned long long st_ino;
} __attribute__((packed));
typedef struct compat_siginfo{
int si_signo;
int si_errno;
int si_code;
union {
int _pad[((128/sizeof(int)) - 3)];
/* kill() */
struct {
unsigned int _pid; /* sender's pid */
unsigned int _uid; /* sender's uid */
} _kill;
/* POSIX.1b timers */
struct {
compat_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
compat_sigval_t _sigval; /* same as below */
int _sys_private; /* not to be passed to user */
int _overrun_incr; /* amount to add to overrun */
} _timer;
/* POSIX.1b signals */
struct {
unsigned int _pid; /* sender's pid */
unsigned int _uid; /* sender's uid */
compat_sigval_t _sigval;
} _rt;
/* SIGCHLD */
struct {
unsigned int _pid; /* which child */
unsigned int _uid; /* sender's uid */
int _status; /* exit code */
compat_clock_t _utime;
compat_clock_t _stime;
} _sigchld;
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
struct {
unsigned int _addr; /* faulting insn/memory ref. */
} _sigfault;
/* SIGPOLL */
struct {
int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
int _fd;
} _sigpoll;
} _sifields;
} compat_siginfo_t;
struct sigframe32
{
u32 pretcode;
int sig;
struct sigcontext_ia32 sc;
struct _fpstate_ia32 fpstate;
unsigned int extramask[_COMPAT_NSIG_WORDS-1];
};
struct rt_sigframe32
{
u32 pretcode;
int sig;
u32 pinfo;
u32 puc;
compat_siginfo_t info;
struct ucontext_ia32 uc;
struct _fpstate_ia32 fpstate;
};
struct ustat32 {
__u32 f_tfree;
compat_ino_t f_tinode;
char f_fname[6];
char f_fpack[6];
};
#define IA32_STACK_TOP IA32_PAGE_OFFSET
#ifdef __KERNEL__
struct user_desc;
struct siginfo_t;
int do_get_thread_area(struct thread_struct *t, struct user_desc __user *info);
int do_set_thread_area(struct thread_struct *t, struct user_desc __user *info);
int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs);
struct linux_binprm;
extern int ia32_setup_arg_pages(struct linux_binprm *bprm,
unsigned long stack_top, int exec_stack);
struct mm_struct;
extern void ia32_pick_mmap_layout(struct mm_struct *mm);
#endif
#endif /* !CONFIG_IA32_SUPPORT */
#endif

View file

@ -0,0 +1,18 @@
#ifndef _ASM_X86_64_IA32_UNISTD_H_
#define _ASM_X86_64_IA32_UNISTD_H_
/*
* This file contains the system call numbers of the ia32 port,
* this is for the kernel only.
* Only add syscalls here where some part of the kernel needs to know
* the number. This should be otherwise in sync with asm-i386/unistd.h. -AK
*/
#define __NR_ia32_restart_syscall 0
#define __NR_ia32_exit 1
#define __NR_ia32_read 3
#define __NR_ia32_write 4
#define __NR_ia32_sigreturn 119
#define __NR_ia32_rt_sigreturn 173
#endif /* _ASM_X86_64_IA32_UNISTD_H_ */

View file

@ -0,0 +1 @@
#include <asm-i386/ide.h>

View file

@ -0,0 +1,14 @@
#ifndef _ASM_X86_64_IDLE_H
#define _ASM_X86_64_IDLE_H 1
#define IDLE_START 1
#define IDLE_END 2
struct notifier_block;
void idle_notifier_register(struct notifier_block *n);
void idle_notifier_unregister(struct notifier_block *n);
void enter_idle(void);
void exit_idle(void);
#endif

View file

@ -0,0 +1,19 @@
#ifndef X86_64_INTEL_ARCH_PERFMON_H
#define X86_64_INTEL_ARCH_PERFMON_H 1
#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0)
#endif /* X86_64_INTEL_ARCH_PERFMON_H */

View file

@ -0,0 +1,693 @@
/*
* Portions Copyright 2008, 2009 VMware, Inc.
*/
#ifndef _ASM_IO_H
#define _ASM_IO_H
/*
* This file contains the definitions for the x86 IO instructions
* inb/inw/inl/outb/outw/outl and the "string versions" of the same
* (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
* versions of the single-IO instructions (inb_p/inw_p/..).
*
* This file is not meant to be obfuscating: it's just complicated
* to (a) handle it all in a way that makes gcc able to optimize it
* as well as possible and (b) trying to avoid writing the same thing
* over and over again with slight variations and possibly making a
* mistake somewhere.
*/
/*
* Thanks to James van Artsdalen for a better timing-fix than
* the two short jumps: using outb's to a nonexistent port seems
* to guarantee better timings even on fast machines.
*
* On the other hand, I'd like to be sure of a non-existent port:
* I feel a bit unsafe about using 0x80 (should be safe, though)
*
* Linus
*/
/*
* Bit simplified and optimized by Jan Hubicka
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
*
* isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
* isa_read[wl] and isa_write[wl] fixed
* - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*/
#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
#ifdef REALLY_SLOW_IO
#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
#else
#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
#endif
/*
* In case of any code change below, make sure you
* go and update the corresponding documentation.
* The documentation file can be found at
* vmkdrivers/src26/doc/dummyDefs.doc
*
* outb
* outb_p
* outw
* outl
* outsw
*/
/*
* Talk about misusing macros..
*/
#define __OUT1(s,x) \
static inline void out##s(unsigned x value, unsigned short port) {
#if defined(__VMKLNX__)
#define __OUT2(s,s1,s2) \
vmk_CPUEnsureClearDF(); \
__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
#else /* !defined(__VMKLNX__) */
#define __OUT2(s,s1,s2) \
__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
#endif /* defined(__VMKLNX__) */
#define __OUT(s,s1,x) \
__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
#define __IN1(s) \
static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
#if defined(__VMKLNX__)
#define __IN2(s,s1,s2) \
vmk_CPUEnsureClearDF(); \
__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
#else /* !defined(__VMKLNX__) */
#define __IN2(s,s1,s2) \
__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
#endif /* defined(__VMKLNX__) */
#define __IN(s,s1,i...) \
__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
#if defined(__VMKLNX__)
#define __INS(s) \
static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
{ \
vmk_CPUEnsureClearDF(); \
__asm__ __volatile__ ("rep ; ins" #s \
: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
#else /* !defined(__VMKLNX__) */
#define __INS(s) \
static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
{ __asm__ __volatile__ ("rep ; ins" #s \
: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
#endif /* defined(__VMKLNX__) */
#if defined(__VMKLNX__)
#define __OUTS(s) \
static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
{ \
vmk_CPUEnsureClearDF(); \
__asm__ __volatile__ ("rep ; outs" #s \
: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
#else /* !defined(__VMKLNX__) */
#define __OUTS(s) \
static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
{ __asm__ __volatile__ ("rep ; outs" #s \
: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
#endif /* defined(__VMKLNX__) */
#define RETURN_TYPE unsigned char
__IN(b,"")
#undef RETURN_TYPE
#define RETURN_TYPE unsigned short
__IN(w,"")
#undef RETURN_TYPE
#define RETURN_TYPE unsigned int
__IN(l,"")
#undef RETURN_TYPE
__OUT(b,"b",char)
__OUT(w,"w",short)
__OUT(l,,int)
__INS(b)
__INS(w)
__INS(l)
__OUTS(b)
__OUTS(w)
__OUTS(l)
#define IO_SPACE_LIMIT 0xffff
#if defined(__KERNEL__) && __x86_64__
#include <linux/vmalloc.h>
#ifndef __i386__
/*
* Change virtual addresses to physical addresses and vv.
* These are pretty trivial
*/
static inline unsigned long virt_to_phys(volatile void * address)
{
return __pa(address);
}
static inline void * phys_to_virt(unsigned long address)
{
return __va(address);
}
#endif
/*
* Change "struct page" to page number.
*/
#if defined(__VMKLNX__)
/**
* phys_to_page - machine address to a page handle
* @maddr : machine address
*
* Converts machine address @maddr to a page handle
*
* SYNOPSIS:
* #define phys_to_page(maddr)
*
* ESX Deviation Notes:
* The resulting page handle cannot be derefenced.
* This page handle needs to be handled through the page api only.
*
*/
/* _VMKLNX_CODECHECK_: phys_to_page */
#define phys_to_page(maddr) ((struct page *)pfn_to_page(maddr >> PAGE_SHIFT))
/**
* page_to_phys - page handle to machine address
* @page: pointer to page handle
*
* Converts page handle @page to a machine address.
*
* SYNOPSIS:
* #define page_to_phys(page)
*
* RETURN VALUE:
* Machine address of the page of type dma_addr_t
*
*/
/* _VMKLNX_CODECHECK_: page_to_phys */
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
#else /* !defined(__VMKLNX__) */
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
#endif /* defined(__VMKLNX__) */
#include <asm-generic/iomap.h>
extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags);
/**
* ioremap - perform a cachable mapping of a physically contiguous range
* @offset: physical address to map
* @size: number of bytes to map
*
* Map in a physically contiguous range into kernel virtual memory and
* get a pointer to the mapped region. The region is mapped cacheable.
*
* RETURN VALUE:
* None.
*
*/
/* _VMKLNX_CODECHECK_: ioremap */
static inline void __iomem * ioremap (unsigned long offset, unsigned long size)
{
return __ioremap(offset, size, 0);
}
extern void *early_ioremap(unsigned long addr, unsigned long size);
extern void early_iounmap(void *addr, unsigned long size);
/*
* This one maps high address device memory and turns off caching for that area.
* it's useful if some control registers are in such an area and write combining
* or read caching is not desirable:
*/
extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
extern void iounmap(volatile void __iomem *addr);
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
#define isa_virt_to_bus virt_to_phys
#define isa_page_to_bus page_to_phys
#define isa_bus_to_virt phys_to_virt
/*
* However PCI ones are not necessarily 1:1 and therefore these interfaces
* are forbidden in portable PCI drivers.
*
* Allow them on x86 for legacy drivers, though.
*/
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
/*
* readX/writeX() are used to access memory mapped devices. On some
* architectures the memory mapped IO stuff needs to be accessed
* differently. On the x86 architecture, we just read/write the
* memory location directly.
*/
/**
* __readb - Read a byte from specified address
* @addr: memory address to read from
*
* Reads a byte from a memory location that is mapped to a
* device.
*
* RETURN VALUE:
* An 8-bit unsigned byte value
*
* SEE ALSO:
* readb
*/
/* _VMKLNX_CODECHECK_: __readb */
static inline __u8 __readb(const volatile void __iomem *addr)
{
return *(__force volatile __u8 *)addr;
}
/**
* __readw - Read a word (16 bits) from specified address
* @addr: memory address to read from
*
* Reads a word (16 bits) from a memory location
* that is mapped to a device
*
* RETURN VALUE:
* A 16-bit unsigned short word value
*
* SEE ALSO:
* readw
*/
/* _VMKLNX_CODECHECK_: __readw */
static inline __u16 __readw(const volatile void __iomem *addr)
{
return *(__force volatile __u16 *)addr;
}
/**
* __readl - Read a long (32 bits) from specified address
* @addr: memory address to read from
*
* Reads a long (32 bits) from a memory location that is
* mapped to a device
*
* RETURN VALUE:
* A 32-bit unsigned long word value
*
* SEE ALSO:
* readl
*/
/* _VMKLNX_CODECHECK_: __readl */
static __always_inline __u32 __readl(const volatile void __iomem *addr)
{
return *(__force volatile __u32 *)addr;
}
/**
* __readq - Read a quad word (64 bits) from specified address
* @addr: memory address to read from
*
* Reads a quad word (64 bits) from a memory location
* that is mapped to a device
*
* RETURN VALUE:
* A 64-bit unsigned quard word value
*
* SEE ALSO:
* readq
*/
/* _VMKLNX_CODECHECK_: __readq */
static inline __u64 __readq(const volatile void __iomem *addr)
{
return *(__force volatile __u64 *)addr;
}
/**
* readb - Read a byte from specified address
* @x: memory address to read from
*
* Reads a byte from a memory location that is mapped to a device.
* readb is an alias to __readb.
*
* RETURN VALUE:
* An 8-bit unsigned byte value
*
* SEE ALSO:
* __readb
*/
/* _VMKLNX_CODECHECK_: readb */
#define readb(x) __readb(x)
/**
* readw - Read a word (16 bits) from specified address
* @x: memory address to read from
*
* Reads a word (16 bits) from a memory location that is mapped to a device.
* readw is an alias to __readw.
*
* RETURN VALUE:
* A 16-bit unsigned short word value
*
* SEE ALSO:
* __readw
*/
/* _VMKLNX_CODECHECK_: readw */
#define readw(x) __readw(x)
/**
* readl - Read a long (32 bits) from specified address
* @x: memory address to read from
*
* Reads a long (32 bits) from a memory location that is mapped to a device.
* readl is an alias to __readl.
*
* RETURN VALUE:
* A 32-bit unsigned long word value
*
* SEE ALSO:
* __readl
*/
/* _VMKLNX_CODECHECK_: readl */
#define readl(x) __readl(x)
/**
* readq - Read a quad word (64 bits) from specified address
* @x: memory address to read from
*
* Reads a quad word (64 bits) from a memory location that is mapped to a device.
* readq is an alias to __readq.
*
* RETURN VALUE:
* A 64-bit unsigned quard word value
*
* SEE ALSO:
* __readq
*/
/* _VMKLNX_CODECHECK_: readq */
#define readq(x) __readq(x)
#define readb_relaxed(a) readb(a)
/**
* readw_relaxed - Read a word (16 bits) from specified address
* @a: memory address to read from
*
* Reads a word (16 bits) from a memory location that is mapped to a device.
* readw_relaxed is an alias to __readw.
*
* RETURN VALUE:
* A 16-bit unsigned short word value
*
* SEE ALSO:
* __readw readw
*/
/* _VMKLNX_CODECHECK_: readw_relaxed */
#define readw_relaxed(a) readw(a)
/**
* readl_relaxed - Read a long (32 bits) from specified address
* @a: memory address to read from
*
* Reads a long (32 bits) from a memory location that is mapped to a device.
* readw_relaxed is an alias to __readl.
*
* RETURN VALUE:
* A 32-bit unsigned long word value
*
* SEE ALSO:
* __readl readl
*/
/* _VMKLNX_CODECHECK_: readl_relaxed */
#define readl_relaxed(a) readl(a)
#define readq_relaxed(a) readq(a)
#define __raw_readb readb
#define __raw_readw readw
#define __raw_readl readl
#define __raw_readq readq
#define mmiowb()
/**
* __writel - write an u32 value to I/O device memory
* @b: the u32 value to be written
* @addr: the iomapped memory address that is obtained from ioremap() or from ioremap_nocache()
*
* This is an internal function. Please call writel instead.
*
* RETURN VALUE:
* This function does not return a value.
*
* SEE ALSO:
* ioremap(), ioremap_nocache()
*
*/
/* _VMKLNX_CODECHECK_: __writel */
static inline void __writel(__u32 b, volatile void __iomem *addr)
{
*(__force volatile __u32 *)addr = b;
}
static inline void __writeq(__u64 b, volatile void __iomem *addr)
{
*(__force volatile __u64 *)addr = b;
}
/**
* __writeb - write an u8 value to I/O device memory
* @b: the u8 value to be written
* @addr: the iomapped memory address that is obtained from ioremap() or from ioremap_nocache()
*
* This is an internal function. Please call writeb instead.
*
* RETURN VALUE:
* This function does not return a value.
*
* SEE ALSO:
* ioremap(), ioremap_nocache()
*
*/
/* _VMKLNX_CODECHECK_: __writeb */
static inline void __writeb(__u8 b, volatile void __iomem *addr)
{
*(__force volatile __u8 *)addr = b;
}
/**
* __writew - write an u16 value to I/O device memory
* @b: the u16 value to be written
* @addr: the iomapped memory address that is obtained from ioremap() or from ioremap_nocache()
*
* This is an internal function. Please call writew instead.
*
* RETURN VALUE:
* This function does not return a value.
*
* SEE ALSO:
* ioremap(), ioremap_nocache()
*
*/
/* _VMKLNX_CODECHECK_: __writew */
static inline void __writew(__u16 b, volatile void __iomem *addr)
{
*(__force volatile __u16 *)addr = b;
}
#define writeq(val,addr) __writeq((val),(addr))
/**
* writel - write an u32 value to I/O device memory
* @val: the u32 value to be written
* @addr: the iomapped memory address that is generated by ioremap() or ioremap_nocache()
*
* Write an u32 value to I/O device memory.
*
* SYNOPSIS:
* #define writel(val,addr)
*
* RETURN VALUE:
* This function does not return a value.
*
* SEE ALSO:
* ioremap(), ioremap_nocache()
*
*/
/* _VMKLNX_CODECHECK_: writel */
#define writel(val,addr) __writel((val),(addr))
/**
* writew - write an u16 value to I/O device memory
* @val: the u16 value to be written
* @addr: the iomapped memory address that is obtained from ioremap() or from ioremap_nocache()
*
* Write an u16 value to I/O device memory.
*
* SYNOPSIS:
* #define writew(val,addr)
*
* RETURN VALUE:
* This function does not return a value.
*
* SEE ALSO:
* ioremap(), ioremap_nocache()
*
*/
/* _VMKLNX_CODECHECK_: writew */
#define writew(val,addr) __writew((val),(addr))
/**
* writeb - write an u8 value to I/O device memory
* @val: the u8 value to be written
* @addr: the iomapped memory address that is generated by ioremap() or ioremap_nocache()
*
* Write an u8 value to I/O device memory.
*
* SYNOPSIS:
* #define writeb(val,addr)
*
* RETURN VALUE:
* This function does not return a value.
*
* SEE ALSO:
* ioremap(), ioremap_nocache()
*
*/
/* _VMKLNX_CODECHECK_: writeb */
#define writeb(val,addr) __writeb((val),(addr))
#define __raw_writeb writeb
#define __raw_writew writew
#define __raw_writel writel
#define __raw_writeq writeq
#if defined(__VMKLNX__)
/**
* memcpy_fromio - copy from an IO device space to main memory space
* @from: the address for the IO Device in PCI space
* @to: the address to where the copy is to happen
* @len: the number of bytes to be copied
*
* Copy @len bytes from @from address of an IO device in the PCI space to
* @to address in main memory space
*
* RETURN VALUE:
* None
*/
/* _VMKLNX_CODECHECK_: memcpy_fromio */
static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len)
{
vmk_Memcpy(to, (void *)from, len);
}
/**
* memcpy_toio - copy from main memory space to IO device space
* @to: the address for the IO Device in PCI space
* @from: the address from where the copy is to happen
* @len: the number of bytes to be copied
*
* Copy @len bytes from @from address to @to address of a IO Device in the PCI
* space.
*
* RETURN VALUE
* None
*/
/* _VMKLNX_CODECHECK_: memcpy_toio */
static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len)
{
vmk_Memcpy((void *)to, from, len);
}
#else /* !defined(__VMKLNX__) */
void __memcpy_fromio(void*,unsigned long,unsigned);
void __memcpy_toio(unsigned long,const void*,unsigned);
static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len)
{
__memcpy_fromio(to,(unsigned long)from,len);
}
static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len)
{
__memcpy_toio((unsigned long)to,from,len);
}
#endif /* defined(__VMKLNX__) */
void memset_io(volatile void __iomem *a, int b, size_t c);
/*
* ISA space is 'always mapped' on a typical x86 system, no need to
* explicitly ioremap() it. The fact that the ISA IO space is mapped
* to PAGE_OFFSET is pure coincidence - it does not mean ISA values
* are physical addresses. The following constant pointer can be
* used as the IO-area pointer (it can be iounmapped as well, so the
* analogy with PCI is quite large):
*/
#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
/*
* Again, x86-64 does not require mem IO specific function.
*/
#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d))
/**
* check_signature - find BIOS signatures
* @io_addr: mmio address to check
* @signature: signature block
* @length: length of signature
*
* Perform a signature comparison with the mmio address io_addr. This
* address should have been obtained by ioremap.
* Returns 1 on a match.
*/
static inline int check_signature(void __iomem *io_addr,
const unsigned char *signature, int length)
{
int retval = 0;
do {
if (readb(io_addr) != *signature)
goto out;
io_addr++;
signature++;
length--;
} while (length);
retval = 1;
out:
return retval;
}
/* Nothing to do */
#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)
#define flush_write_buffers()
extern int iommu_bio_merge;
#define BIO_VMERGE_BOUNDARY iommu_bio_merge
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif /* __KERNEL__ */
#endif

View file

@ -0,0 +1,222 @@
#ifndef __ASM_IO_APIC_H
#define __ASM_IO_APIC_H
#include <asm/types.h>
#include <asm/mpspec.h>
/*
* Intel IO-APIC support for SMP and UP systems.
*
* Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar
*/
#ifdef CONFIG_X86_IO_APIC
#ifdef CONFIG_PCI_MSI
static inline int use_pci_vector(void) {return 1;}
static inline void disable_edge_ioapic_vector(unsigned int vector) { }
static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { }
static inline void end_edge_ioapic_vector (unsigned int vector) { }
#define startup_level_ioapic startup_level_ioapic_vector
#define shutdown_level_ioapic mask_IO_APIC_vector
#define enable_level_ioapic unmask_IO_APIC_vector
#define disable_level_ioapic mask_IO_APIC_vector
#define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_vector
#define end_level_ioapic end_level_ioapic_vector
#define set_ioapic_affinity set_ioapic_affinity_vector
#define startup_edge_ioapic startup_edge_ioapic_vector
#define shutdown_edge_ioapic disable_edge_ioapic_vector
#define enable_edge_ioapic unmask_IO_APIC_vector
#define disable_edge_ioapic disable_edge_ioapic_vector
#define ack_edge_ioapic ack_edge_ioapic_vector
#define end_edge_ioapic end_edge_ioapic_vector
#else
static inline int use_pci_vector(void) {return 0;}
static inline void disable_edge_ioapic_irq(unsigned int irq) { }
static inline void mask_and_ack_level_ioapic_irq(unsigned int irq) { }
static inline void end_edge_ioapic_irq (unsigned int irq) { }
#define startup_level_ioapic startup_level_ioapic_irq
#define shutdown_level_ioapic mask_IO_APIC_irq
#define enable_level_ioapic unmask_IO_APIC_irq
#define disable_level_ioapic mask_IO_APIC_irq
#define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_irq
#define end_level_ioapic end_level_ioapic_irq
#define set_ioapic_affinity set_ioapic_affinity_irq
#define startup_edge_ioapic startup_edge_ioapic_irq
#define shutdown_edge_ioapic disable_edge_ioapic_irq
#define enable_edge_ioapic unmask_IO_APIC_irq
#define disable_edge_ioapic disable_edge_ioapic_irq
#define ack_edge_ioapic ack_edge_ioapic_irq
#define end_edge_ioapic end_edge_ioapic_irq
#endif
#define APIC_MISMATCH_DEBUG
#define IO_APIC_BASE(idx) \
((volatile int *)(__fix_to_virt(FIX_IO_APIC_BASE_0 + idx) \
+ (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK)))
/*
* The structure of the IO-APIC:
*/
union IO_APIC_reg_00 {
u32 raw;
struct {
u32 __reserved_2 : 14,
LTS : 1,
delivery_type : 1,
__reserved_1 : 8,
ID : 8;
} __attribute__ ((packed)) bits;
};
union IO_APIC_reg_01 {
u32 raw;
struct {
u32 version : 8,
__reserved_2 : 7,
PRQ : 1,
entries : 8,
__reserved_1 : 8;
} __attribute__ ((packed)) bits;
};
union IO_APIC_reg_02 {
u32 raw;
struct {
u32 __reserved_2 : 24,
arbitration : 4,
__reserved_1 : 4;
} __attribute__ ((packed)) bits;
};
union IO_APIC_reg_03 {
u32 raw;
struct {
u32 boot_DT : 1,
__reserved_1 : 31;
} __attribute__ ((packed)) bits;
};
/*
* # of IO-APICs and # of IRQ routing registers
*/
extern int nr_ioapics;
extern int nr_ioapic_registers[MAX_IO_APICS];
enum ioapic_irq_destination_types {
dest_Fixed = 0,
dest_LowestPrio = 1,
dest_SMI = 2,
dest__reserved_1 = 3,
dest_NMI = 4,
dest_INIT = 5,
dest__reserved_2 = 6,
dest_ExtINT = 7
};
struct IO_APIC_route_entry {
__u32 vector : 8,
delivery_mode : 3, /* 000: FIXED
* 001: lowest prio
* 111: ExtINT
*/
dest_mode : 1, /* 0: physical, 1: logical */
delivery_status : 1,
polarity : 1,
irr : 1,
trigger : 1, /* 0: edge, 1: level */
mask : 1, /* 0: enabled, 1: disabled */
__reserved_2 : 15;
union { struct { __u32
__reserved_1 : 24,
physical_dest : 4,
__reserved_2 : 4;
} physical;
struct { __u32
__reserved_1 : 24,
logical_dest : 8;
} logical;
} dest;
} __attribute__ ((packed));
/*
* MP-BIOS irq configuration table structures:
*/
/* I/O APIC entries */
extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
/* # of MP IRQ source entries */
extern int mp_irq_entries;
/* MP IRQ source entries */
extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
/* non-0 if default (table-less) MP configuration */
extern int mpc_default_type;
static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
{
*IO_APIC_BASE(apic) = reg;
return *(IO_APIC_BASE(apic)+4);
}
static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
{
*IO_APIC_BASE(apic) = reg;
*(IO_APIC_BASE(apic)+4) = value;
}
/*
* Re-write a value: to be used for read-modify-write
* cycles where the read already set up the index register.
*/
static inline void io_apic_modify(unsigned int apic, unsigned int value)
{
*(IO_APIC_BASE(apic)+4) = value;
}
/*
* Synchronize the IO-APIC and the CPU by doing
* a dummy read from the IO-APIC
*/
static inline void io_apic_sync(unsigned int apic)
{
(void) *(IO_APIC_BASE(apic)+4);
}
/* 1 if "noapic" boot option passed */
extern int skip_ioapic_setup;
/*
* If we use the IO-APIC for IRQ routing, disable automatic
* assignment of PCI IRQ's.
*/
#define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
#ifdef CONFIG_ACPI
extern int io_apic_get_version (int ioapic);
extern int io_apic_get_redir_entries (int ioapic);
extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int, int);
extern int timer_uses_ioapic_pin_0;
#endif
extern int sis_apic_bug; /* dummy */
#else /* !CONFIG_X86_IO_APIC */
#define io_apic_assign_pci_irqs 0
#endif
extern int assign_irq_vector(int irq);
void enable_NMI_through_LVT0 (void * dummy);
extern spinlock_t i8259A_lock;
#endif

View file

@ -0,0 +1 @@
#include <asm-generic/ioctl.h>

View file

@ -0,0 +1 @@
#include <linux/ioctl32.h>

View file

@ -0,0 +1,82 @@
#ifndef __ARCH_X8664_IOCTLS_H__
#define __ARCH_X8664_IOCTLS_H__
#include <asm/ioctl.h>
/* 0x54 is just a magic number to make these relatively unique ('T') */
#define TCGETS 0x5401
#define TCSETS 0x5402
#define TCSETSW 0x5403
#define TCSETSF 0x5404
#define TCGETA 0x5405
#define TCSETA 0x5406
#define TCSETAW 0x5407
#define TCSETAF 0x5408
#define TCSBRK 0x5409
#define TCXONC 0x540A
#define TCFLSH 0x540B
#define TIOCEXCL 0x540C
#define TIOCNXCL 0x540D
#define TIOCSCTTY 0x540E
#define TIOCGPGRP 0x540F
#define TIOCSPGRP 0x5410
#define TIOCOUTQ 0x5411
#define TIOCSTI 0x5412
#define TIOCGWINSZ 0x5413
#define TIOCSWINSZ 0x5414
#define TIOCMGET 0x5415
#define TIOCMBIS 0x5416
#define TIOCMBIC 0x5417
#define TIOCMSET 0x5418
#define TIOCGSOFTCAR 0x5419
#define TIOCSSOFTCAR 0x541A
#define FIONREAD 0x541B
#define TIOCINQ FIONREAD
#define TIOCLINUX 0x541C
#define TIOCCONS 0x541D
#define TIOCGSERIAL 0x541E
#define TIOCSSERIAL 0x541F
#define TIOCPKT 0x5420
#define FIONBIO 0x5421
#define TIOCNOTTY 0x5422
#define TIOCSETD 0x5423
#define TIOCGETD 0x5424
#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
#define TIOCSBRK 0x5427 /* BSD compatibility */
#define TIOCCBRK 0x5428 /* BSD compatibility */
#define TIOCGSID 0x5429 /* Return the session ID of FD */
#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
#define FIOCLEX 0x5451
#define FIOASYNC 0x5452
#define TIOCSERCONFIG 0x5453
#define TIOCSERGWILD 0x5454
#define TIOCSERSWILD 0x5455
#define TIOCGLCKTRMIOS 0x5456
#define TIOCSLCKTRMIOS 0x5457
#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
#define TIOCSERGETLSR 0x5459 /* Get line status register */
#define TIOCSERGETMULTI 0x545A /* Get multiport config */
#define TIOCSERSETMULTI 0x545B /* Set multiport config */
#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
#define FIOQSIZE 0x5460
/* Used for packet mode */
#define TIOCPKT_DATA 0
#define TIOCPKT_FLUSHREAD 1
#define TIOCPKT_FLUSHWRITE 2
#define TIOCPKT_STOP 4
#define TIOCPKT_START 8
#define TIOCPKT_NOSTOP 16
#define TIOCPKT_DOSTOP 32
#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
#endif

View file

@ -0,0 +1,6 @@
#ifndef __x8664_IPC_H__
#define __x8664_IPC_H__
/* dummy */
#endif

View file

@ -0,0 +1,29 @@
#ifndef __x86_64_IPCBUF_H__
#define __x86_64_IPCBUF_H__
/*
* The ipc64_perm structure for x86_64 architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 32-bit mode_t and seq
* - 2 miscellaneous 32-bit values
*/
struct ipc64_perm
{
__kernel_key_t key;
__kernel_uid32_t uid;
__kernel_gid32_t gid;
__kernel_uid32_t cuid;
__kernel_gid32_t cgid;
__kernel_mode_t mode;
unsigned short __pad1;
unsigned short seq;
unsigned short __pad2;
unsigned long __unused1;
unsigned long __unused2;
};
#endif /* __x86_64_IPCBUF_H__ */

View file

@ -0,0 +1,119 @@
#ifndef __ASM_IPI_H
#define __ASM_IPI_H
/*
* Copyright 2004 James Cleverdon, IBM.
* Subject to the GNU Public License, v.2
*
* Generic APIC InterProcessor Interrupt code.
*
* Moved to include file by James Cleverdon from
* arch/x86-64/kernel/smp.c
*
* Copyrights from kernel/smp.c:
*
* (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
* (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
* (c) 2002,2003 Andi Kleen, SuSE Labs.
* Subject to the GNU Public License, v.2
*/
#include <asm/fixmap.h>
#include <asm/hw_irq.h>
#include <asm/apicdef.h>
#include <asm/genapic.h>
/*
* the following functions deal with sending IPIs between CPUs.
*
* We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
*/
static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, unsigned int dest)
{
unsigned int icr = shortcut | dest;
switch (vector) {
default:
icr |= APIC_DM_FIXED | vector;
break;
case NMI_VECTOR:
icr |= APIC_DM_NMI;
break;
}
return icr;
}
static inline int __prepare_ICR2 (unsigned int mask)
{
return SET_APIC_DEST_FIELD(mask);
}
#ifndef CONFIG_XEN_UNPRIVILEGED_GUEST
static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
{
/*
* Subtle. In the case of the 'never do double writes' workaround
* we have to lock out interrupts to be safe. As we don't care
* of the value read we use an atomic rmw access to avoid costly
* cli/sti. Otherwise we use an even cheaper single atomic write
* to the APIC.
*/
unsigned int cfg;
/*
* Wait for idle.
*/
apic_wait_icr_idle();
/*
* No need to touch the target chip field
*/
cfg = __prepare_ICR(shortcut, vector, dest);
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
apic_write(APIC_ICR, cfg);
}
static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
{
unsigned long cfg, flags;
unsigned long query_cpu;
/*
* Hack. The clustered APIC addressing mode doesn't allow us to send
* to an arbitrary mask, so I do a unicast to each CPU instead.
* - mbligh
*/
local_irq_save(flags);
for_each_cpu_mask(query_cpu, mask) {
/*
* Wait for idle.
*/
apic_wait_icr_idle();
/*
* prepare target chip field
*/
cfg = __prepare_ICR2(x86_cpu_to_apicid[query_cpu]);
apic_write(APIC_ICR2, cfg);
/*
* program the ICR
*/
cfg = __prepare_ICR(0, vector, APIC_DEST_PHYSICAL);
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
apic_write(APIC_ICR, cfg);
}
local_irq_restore(flags);
}
#endif /* CONFIG_XEN_UNPRIVILEGED_GUEST */
#endif /* __ASM_IPI_H */

View file

@ -0,0 +1,59 @@
#ifndef _ASM_IRQ_H
#define _ASM_IRQ_H
/*
* linux/include/asm/irq.h
*
* (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
*
* IRQ/IPI changes taken from work by Thomas Radke
* <tomsoft@informatik.tu-chemnitz.de>
*/
#define TIMER_IRQ 0
/*
* 16 8259A IRQ's, 208 potential APIC interrupt sources.
* Right now the APIC is mostly only used for SMP.
* 256 vectors is an architectural limit. (we can have
* more than 256 devices theoretically, but they will
* have to use shared interrupts)
* Since vectors 0x00-0x1f are used/reserved for the CPU,
* the usable vector space is 0x20-0xff (224 vectors)
*/
/*
* The maximum number of vectors supported by x86_64 processors
* is limited to 256. For processors other than x86_64, NR_VECTORS
* should be changed accordingly.
*/
#define NR_VECTORS 256
/* duplicated in hw_irq.h */
#define FIRST_SYSTEM_VECTOR 0xef
#ifdef CONFIG_PCI_MSI
#define NR_IRQS FIRST_SYSTEM_VECTOR
#define NR_IRQ_VECTORS NR_IRQS
#else
#define NR_IRQS 224
#define NR_IRQ_VECTORS (32 * NR_CPUS)
#endif
static __inline__ int irq_canonicalize(int irq)
{
return ((irq == 2) ? 9 : irq);
}
#ifdef CONFIG_X86_LOCAL_APIC
#define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
#endif
#ifdef CONFIG_HOTPLUG_CPU
#include <linux/cpumask.h>
extern void fixup_irqs(cpumask_t map);
#endif
#define __ARCH_HAS_DO_SOFTIRQ 1
#endif /* _ASM_IRQ_H */

View file

@ -0,0 +1,153 @@
/*
* include/asm-x86_64/irqflags.h
*
* IRQ flags handling
*
* This file gets included from lowlevel asm headers too, to provide
* wrapped versions of the local_irq_*() APIs, based on the
* raw_local_irq_*() functions from the lowlevel headers.
*/
#ifndef _ASM_IRQFLAGS_H
#define _ASM_IRQFLAGS_H
#ifndef __ASSEMBLY__
/*
* Interrupt control:
*/
static inline unsigned long __raw_local_save_flags(void)
{
unsigned long flags;
__asm__ __volatile__(
"# __raw_save_flags\n\t"
"pushfq ; popq %q0"
: "=g" (flags)
: /* no input */
: "memory"
);
return flags;
}
#define raw_local_save_flags(flags) \
do { (flags) = __raw_local_save_flags(); } while (0)
static inline void raw_local_irq_restore(unsigned long flags)
{
__asm__ __volatile__(
"pushq %0 ; popfq"
: /* no output */
:"g" (flags)
:"memory", "cc"
);
}
#ifdef CONFIG_X86_VSMP
/*
* Interrupt control for the VSMP architecture:
*/
static inline void raw_local_irq_disable(void)
{
unsigned long flags = __raw_local_save_flags();
raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18));
}
static inline void raw_local_irq_enable(void)
{
unsigned long flags = __raw_local_save_flags();
raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18));
}
/**
* raw_irqs_disabled_flags - <1 Line Description>
* @<arg1>: <first argument description>
* @<arg2>: <second argument description>
*
* <full description>
*
* ESX Deviation Notes:
* <Describes how ESX implementation is different from standard Linux.>
*
*/
/* _VMKLNX_CODECHECK_: raw_irqs_disabled_flags */
static inline int raw_irqs_disabled_flags(unsigned long flags)
{
return !(flags & (1<<9)) || (flags & (1 << 18));
}
#else /* CONFIG_X86_VSMP */
static inline void raw_local_irq_disable(void)
{
__asm__ __volatile__("cli" : : : "memory");
}
static inline void raw_local_irq_enable(void)
{
__asm__ __volatile__("sti" : : : "memory");
}
static inline int raw_irqs_disabled_flags(unsigned long flags)
{
return !(flags & (1 << 9));
}
#endif
/*
* For spinlocks, etc.:
*/
static inline unsigned long __raw_local_irq_save(void)
{
unsigned long flags = __raw_local_save_flags();
raw_local_irq_disable();
return flags;
}
#define raw_local_irq_save(flags) \
do { (flags) = __raw_local_irq_save(); } while (0)
static inline int raw_irqs_disabled(void)
{
unsigned long flags = __raw_local_save_flags();
return raw_irqs_disabled_flags(flags);
}
/*
* Used in the idle loop; sti takes one instruction cycle
* to complete:
*/
static inline void raw_safe_halt(void)
{
__asm__ __volatile__("sti; hlt" : : : "memory");
}
/*
* Used when interrupts are already enabled or to
* shutdown the processor:
*/
static inline void halt(void)
{
__asm__ __volatile__("hlt": : :"memory");
}
#else /* __ASSEMBLY__: */
# ifdef CONFIG_TRACE_IRQFLAGS
# define TRACE_IRQS_ON call trace_hardirqs_on_thunk
# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk
# else
# define TRACE_IRQS_ON
# define TRACE_IRQS_OFF
# endif
#endif
#endif

View file

@ -0,0 +1,14 @@
#ifndef _ASM_K8_H
#define _ASM_K8_H 1
#include <linux/pci.h>
extern struct pci_device_id k8_nb_ids[];
extern int early_is_k8_nb(u32 value);
extern struct pci_dev **k8_northbridges;
extern int num_k8_northbridges;
extern int cache_k8_northbridges(void);
extern void k8_flush_garts(void);
#endif

View file

@ -0,0 +1,60 @@
#ifndef _X86_64_KDEBUG_H
#define _X86_64_KDEBUG_H 1
#include <linux/notifier.h>
struct pt_regs;
struct die_args {
struct pt_regs *regs;
const char *str;
long err;
int trapnr;
int signr;
};
extern int register_die_notifier(struct notifier_block *);
extern int unregister_die_notifier(struct notifier_block *);
extern int register_page_fault_notifier(struct notifier_block *);
extern int unregister_page_fault_notifier(struct notifier_block *);
extern struct atomic_notifier_head die_chain;
/* Grossly misnamed. */
enum die_val {
DIE_OOPS = 1,
DIE_INT3,
DIE_DEBUG,
DIE_PANIC,
DIE_NMI,
DIE_DIE,
DIE_NMIWATCHDOG,
DIE_KERNELDEBUG,
DIE_TRAP,
DIE_GPF,
DIE_CALL,
DIE_NMI_IPI,
DIE_PAGE_FAULT,
};
static inline int notify_die(enum die_val val, const char *str,
struct pt_regs *regs, long err, int trap, int sig)
{
struct die_args args = {
.regs = regs,
.str = str,
.err = err,
.trapnr = trap,
.signr = sig
};
return atomic_notifier_call_chain(&die_chain, val, &args);
}
extern void printk_address(unsigned long address);
extern void die(const char *,struct pt_regs *,long);
extern void __die(const char *,struct pt_regs *,long);
extern void show_registers(struct pt_regs *regs);
extern void dump_pagetable(unsigned long);
extern unsigned long oops_begin(void);
extern void oops_end(unsigned long);
#endif

View file

@ -0,0 +1,67 @@
#ifndef _X86_64_KEXEC_H
#define _X86_64_KEXEC_H
#include <linux/string.h>
#include <asm/page.h>
#include <asm/ptrace.h>
/*
* KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
* I.e. Maximum page that is mapped directly into kernel memory,
* and kmap is not required.
*
* So far x86_64 is limited to 40 physical address bits.
*/
/* Maximum physical address we can use pages from */
#define KEXEC_SOURCE_MEMORY_LIMIT (0xFFFFFFFFFFUL)
/* Maximum address we can reach in physical address mode */
#define KEXEC_DESTINATION_MEMORY_LIMIT (0xFFFFFFFFFFUL)
/* Maximum address we can use for the control pages */
#define KEXEC_CONTROL_MEMORY_LIMIT (0xFFFFFFFFFFUL)
/* Allocate one page for the pdp and the second for the code */
#define KEXEC_CONTROL_CODE_SIZE (4096UL + 4096UL)
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_X86_64
#define MAX_NOTE_BYTES 1024
/*
* Saving the registers of the cpu on which panic occured in
* crash_kexec to save a valid sp. The registers of other cpus
* will be saved in machine_crash_shutdown while shooting down them.
*/
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
if (oldregs)
memcpy(newregs, oldregs, sizeof(*newregs));
else {
__asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->rbx));
__asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->rcx));
__asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->rdx));
__asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->rsi));
__asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->rdi));
__asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->rbp));
__asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->rax));
__asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->rsp));
__asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8));
__asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9));
__asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10));
__asm__ __volatile__("movq %%r11,%0" : "=m"(newregs->r11));
__asm__ __volatile__("movq %%r12,%0" : "=m"(newregs->r12));
__asm__ __volatile__("movq %%r13,%0" : "=m"(newregs->r13));
__asm__ __volatile__("movq %%r14,%0" : "=m"(newregs->r14));
__asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15));
__asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss));
__asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs));
__asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->eflags));
newregs->rip = (unsigned long)current_text_addr();
}
}
#endif /* _X86_64_KEXEC_H */

View file

@ -0,0 +1,19 @@
#ifndef _ASM_KMAP_TYPES_H
#define _ASM_KMAP_TYPES_H
enum km_type {
KM_BOUNCE_READ,
KM_SKB_SUNRPC_DATA,
KM_SKB_DATA_SOFTIRQ,
KM_USER0,
KM_USER1,
KM_BIO_SRC_IRQ,
KM_BIO_DST_IRQ,
KM_IRQ0,
KM_IRQ1,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
KM_TYPE_NR
};
#endif

View file

@ -0,0 +1,91 @@
#ifndef _ASM_KPROBES_H
#define _ASM_KPROBES_H
/*
* Kernel Probes (KProbes)
* include/asm-x86_64/kprobes.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2002, 2004
*
* 2004-Oct Prasanna S Panchamukhi <prasanna@in.ibm.com> and Jim Keniston
* kenistoj@us.ibm.com adopted from i386.
*/
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/percpu.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
struct pt_regs;
struct kprobe;
typedef u8 kprobe_opcode_t;
#define BREAKPOINT_INSTRUCTION 0xcc
#define MAX_INSN_SIZE 15
#define MAX_STACK_SIZE 64
#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
(((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \
? (MAX_STACK_SIZE) \
: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
#define ARCH_SUPPORTS_KRETPROBES
#define ARCH_INACTIVE_KPROBE_COUNT 1
void kretprobe_trampoline(void);
extern void arch_remove_kprobe(struct kprobe *p);
#define flush_insn_slot(p) do { } while (0)
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
/* copy of the original instruction */
kprobe_opcode_t *insn;
};
struct prev_kprobe {
struct kprobe *kp;
unsigned long status;
unsigned long old_rflags;
unsigned long saved_rflags;
};
/* per-cpu kprobe control block */
struct kprobe_ctlblk {
unsigned long kprobe_status;
unsigned long kprobe_old_rflags;
unsigned long kprobe_saved_rflags;
long *jprobe_saved_rsp;
struct pt_regs jprobe_saved_regs;
kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
struct prev_kprobe prev_kprobe;
};
/* trap3/1 are intr gates for kprobes. So, restore the status of IF,
* if necessary, before executing the original int3/1 (trap) handler.
*/
static inline void restore_interrupts(struct pt_regs *regs)
{
if (regs->eflags & IF_MASK)
local_irq_enable();
}
extern int post_kprobe_handler(struct pt_regs *regs);
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_handler(struct pt_regs *regs);
extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
#endif /* _ASM_KPROBES_H */

View file

@ -0,0 +1,36 @@
/*
* ldt.h
*
* Definitions of structures used with the modify_ldt system call.
*/
#ifndef _LINUX_LDT_H
#define _LINUX_LDT_H
/* Maximum number of LDT entries supported. */
#define LDT_ENTRIES 8192
/* The size of each LDT entry. */
#define LDT_ENTRY_SIZE 8
#ifndef __ASSEMBLY__
/* Note on 64bit base and limit is ignored and you cannot set
DS/ES/CS not to the default values if you still want to do syscalls. This
call is more for 32bit mode therefore. */
struct user_desc {
unsigned int entry_number;
unsigned int base_addr;
unsigned int limit;
unsigned int seg_32bit:1;
unsigned int contents:2;
unsigned int read_exec_only:1;
unsigned int limit_in_pages:1;
unsigned int seg_not_present:1;
unsigned int useable:1;
unsigned int lm:1;
};
#define MODIFY_LDT_CONTENTS_DATA 0
#define MODIFY_LDT_CONTENTS_STACK 1
#define MODIFY_LDT_CONTENTS_CODE 2
#endif /* !__ASSEMBLY__ */
#endif

View file

@ -0,0 +1,6 @@
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
/* Nothing to see here... */
#endif

View file

@ -0,0 +1,88 @@
#ifndef _ARCH_X8664_LOCAL_H
#define _ARCH_X8664_LOCAL_H
#include <linux/percpu.h>
typedef struct
{
volatile long counter;
} local_t;
#define LOCAL_INIT(i) { (i) }
#define local_read(v) ((v)->counter)
#define local_set(v,i) (((v)->counter) = (i))
static inline void local_inc(local_t *v)
{
__asm__ __volatile__(
"incq %0"
:"=m" (v->counter)
:"m" (v->counter));
}
static inline void local_dec(local_t *v)
{
__asm__ __volatile__(
"decq %0"
:"=m" (v->counter)
:"m" (v->counter));
}
static inline void local_add(long i, local_t *v)
{
__asm__ __volatile__(
"addq %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
static inline void local_sub(long i, local_t *v)
{
__asm__ __volatile__(
"subq %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
/* On x86-64 these are better than the atomic variants on SMP kernels
because they dont use a lock prefix. */
#define __local_inc(l) local_inc(l)
#define __local_dec(l) local_dec(l)
#define __local_add(i,l) local_add((i),(l))
#define __local_sub(i,l) local_sub((i),(l))
/* Use these for per-cpu local_t variables: on some archs they are
* much more efficient than these naive implementations. Note they take
* a variable, not an address.
*
* This could be done better if we moved the per cpu data directly
* after GS.
*/
/* Need to disable preemption for the cpu local counters otherwise we could
still access a variable of a previous CPU in a non atomic way. */
#define cpu_local_wrap_v(v) \
({ local_t res__; \
preempt_disable(); \
res__ = (v); \
preempt_enable(); \
res__; })
#define cpu_local_wrap(v) \
({ preempt_disable(); \
v; \
preempt_enable(); }) \
#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v)))
#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i)))
#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v)))
#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v)))
#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v)))
#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v)))
#define __cpu_local_inc(v) cpu_local_inc(v)
#define __cpu_local_dec(v) cpu_local_dec(v)
#define __cpu_local_add(i, v) cpu_local_add((i), (v))
#define __cpu_local_sub(i, v) cpu_local_sub((i), (v))
#endif /* _ARCH_I386_LOCAL_H */

View file

@ -0,0 +1,29 @@
#ifndef __ASM_MACH_APIC_H
#define __ASM_MACH_APIC_H
/*
* Copyright 2004 James Cleverdon, IBM.
* Subject to the GNU Public License, v.2
*
* Generic APIC sub-arch defines.
*
* Hacked for x86-64 by James Cleverdon from i386 architecture code by
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
* James Cleverdon.
*/
#include <asm/genapic.h>
#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
#define INT_DEST_MODE (genapic->int_dest_mode)
#define INT_DELIVERY_DEST (genapic->int_delivery_dest)
#define TARGET_CPUS (genapic->target_cpus())
#define apic_id_registered (genapic->apic_id_registered)
#define init_apic_ldr (genapic->init_apic_ldr)
#define send_IPI_mask (genapic->send_IPI_mask)
#define send_IPI_allbutself (genapic->send_IPI_allbutself)
#define send_IPI_all (genapic->send_IPI_all)
#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
#define phys_pkg_id (genapic->phys_pkg_id)
#endif /* __ASM_MACH_APIC_H */

View file

@ -0,0 +1,29 @@
/*
* Machine dependent access functions for RTC registers.
*/
#ifndef _ASM_MC146818RTC_H
#define _ASM_MC146818RTC_H
#include <asm/io.h>
#ifndef RTC_PORT
#define RTC_PORT(x) (0x70 + (x))
#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
#endif
/*
* The yet supported machines all access the RTC index register via
* an ISA port access but the way to access the date register differs ...
*/
#define CMOS_READ(addr) ({ \
outb_p((addr),RTC_PORT(0)); \
inb_p(RTC_PORT(1)); \
})
#define CMOS_WRITE(val, addr) ({ \
outb_p((addr),RTC_PORT(0)); \
outb_p((val),RTC_PORT(1)); \
})
#define RTC_IRQ 8
#endif /* _ASM_MC146818RTC_H */

View file

@ -0,0 +1,106 @@
#ifndef _ASM_MCE_H
#define _ASM_MCE_H 1
#include <asm/ioctls.h>
#include <asm/types.h>
/*
* Machine Check support for x86
*/
#define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */
#define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */
#define MCG_STATUS_EIPV (1UL<<1) /* eip points to correct instruction */
#define MCG_STATUS_MCIP (1UL<<2) /* machine check in progress */
#define MCI_STATUS_VAL (1UL<<63) /* valid error */
#define MCI_STATUS_OVER (1UL<<62) /* previous errors lost */
#define MCI_STATUS_UC (1UL<<61) /* uncorrected error */
#define MCI_STATUS_EN (1UL<<60) /* error enabled */
#define MCI_STATUS_MISCV (1UL<<59) /* misc error reg. valid */
#define MCI_STATUS_ADDRV (1UL<<58) /* addr reg. valid */
#define MCI_STATUS_PCC (1UL<<57) /* processor context corrupt */
/* Fields are zero when not available */
struct mce {
__u64 status;
__u64 misc;
__u64 addr;
__u64 mcgstatus;
__u64 rip;
__u64 tsc; /* cpu time stamp counter */
__u64 res1; /* for future extension */
__u64 res2; /* dito. */
__u8 cs; /* code segment */
__u8 bank; /* machine check bank */
__u8 cpu; /* cpu that raised the error */
__u8 finished; /* entry is valid */
__u32 pad;
};
/*
* This structure contains all data related to the MCE log.
* Also carries a signature to make it easier to find from external debugging tools.
* Each entry is only valid when its finished flag is set.
*/
#define MCE_LOG_LEN 32
struct mce_log {
char signature[12]; /* "MACHINECHECK" */
unsigned len; /* = MCE_LOG_LEN */
unsigned next;
unsigned flags;
unsigned pad0;
struct mce entry[MCE_LOG_LEN];
};
#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
#define MCE_LOG_SIGNATURE "MACHINECHECK"
#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
#define MCE_GET_LOG_LEN _IOR('M', 2, int)
#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int)
/* Software defined banks */
#define MCE_EXTENDED_BANK 128
#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) /* MCE_AMD */
#define K8_MCE_THRESHOLD_BANK_0 (MCE_THRESHOLD_BASE + 0 * 9)
#define K8_MCE_THRESHOLD_BANK_1 (MCE_THRESHOLD_BASE + 1 * 9)
#define K8_MCE_THRESHOLD_BANK_2 (MCE_THRESHOLD_BASE + 2 * 9)
#define K8_MCE_THRESHOLD_BANK_3 (MCE_THRESHOLD_BASE + 3 * 9)
#define K8_MCE_THRESHOLD_BANK_4 (MCE_THRESHOLD_BASE + 4 * 9)
#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9)
#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0)
#ifdef __KERNEL__
#include <asm/atomic.h>
void mce_log(struct mce *m);
DECLARE_PER_CPU(struct sys_device, device_mce);
#ifdef CONFIG_X86_MCE_INTEL
void mce_intel_feature_init(struct cpuinfo_x86 *c);
#else
static inline void mce_intel_feature_init(struct cpuinfo_x86 *c)
{
}
#endif
#ifdef CONFIG_X86_MCE_AMD
void mce_amd_feature_init(struct cpuinfo_x86 *c);
#else
static inline void mce_amd_feature_init(struct cpuinfo_x86 *c)
{
}
#endif
extern atomic_t mce_entry;
#endif
#endif

View file

@ -0,0 +1,98 @@
/*
* Routines and structures for building a bitmap of
* dirty pages in a live system. For use in memory mirroring
* or migration applications.
*
* Copyright (C) 2006 Stratus Technologies Bermuda Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __X86_64_MMTRACK_H__
#define __X86_64_MMTRACK_H__
#ifndef CONFIG_TRACK_DIRTY_PAGES
static inline void mm_track_pte(pte_t *ptep) {}
static inline void mm_track_pmd(pmd_t *pmdp) {}
static inline void mm_track_pud(pud_t *pudp) {}
static inline void mm_track_pgd(pgd_t *pgdp) {}
static inline void mm_track_phys(void *physp) {}
#else
#include <asm/page.h>
#include <asm/atomic.h>
/*
* For memory-tracking purposes, if active is true (non-zero), the other
* elements of the structure are available for use. Each time mm_track_pte
* is called, it increments count and sets a bit in the bitvector table.
* Each bit in the bitvector represents a physical page in memory.
*
* This is declared in arch/x86_64/mm/track.c.
*
* The in_use element is used in the code which drives the memory tracking
* environment. When tracking is complete, the vector may be freed, but
* only after the active flag is set to zero and the in_use count goes to
* zero.
*
* The count element indicates how many pages have been stored in the
* bitvector. This is an optimization to avoid counting the bits in the
* vector between harvest operations.
*/
struct mm_tracker {
int active; // non-zero if this structure in use
atomic_t count; // number of pages tracked by mm_track()
unsigned long * vector; // bit vector of modified pages
unsigned long bitcnt; // number of bits in vector
};
extern struct mm_tracker mm_tracking_struct;
extern void do_mm_track_pte(void *);
extern void do_mm_track_pmd(void *);
extern void do_mm_track_pud(void *);
extern void do_mm_track_pgd(void *);
extern void do_mm_track_phys(void *);
/*
* The mm_track routine is needed by macros in pgtable.h
*/
static __inline__ void mm_track_pte(pte_t *ptep)
{
if (unlikely(mm_tracking_struct.active))
do_mm_track_pte(ptep);
}
static __inline__ void mm_track_pmd(pmd_t *pmdp)
{
if (unlikely(mm_tracking_struct.active))
do_mm_track_pmd(pmdp);
}
static __inline__ void mm_track_pud(pud_t *pudp)
{
if (unlikely(mm_tracking_struct.active))
do_mm_track_pud(pudp);
}
static __inline__ void mm_track_pgd(pgd_t *pgdp)
{
if (unlikely(mm_tracking_struct.active))
do_mm_track_pgd(pgdp);
}
static __inline__ void mm_track_phys(void *physp)
{
if (unlikely(mm_tracking_struct.active))
do_mm_track_phys(physp);
}
#endif /* CONFIG_TRACK_DIRTY_PAGES */
#endif /* __X86_64_MMTRACK_H__ */

View file

@ -0,0 +1,19 @@
#ifndef __X8664_MMAN_H__
#define __X8664_MMAN_H__
#include <asm-generic/mman.h>
#define MAP_32BIT 0x40 /* only give out 32bit addresses */
#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
#define MAP_LOCKED 0x2000 /* pages are locked */
#define MAP_NORESERVE 0x4000 /* don't check for reservations */
#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x10000 /* do not block on IO */
#define MCL_CURRENT 1 /* lock all current mappings */
#define MCL_FUTURE 2 /* lock all future mappings */
#endif

View file

@ -0,0 +1,8 @@
#ifndef _ASM_MMSEGMENT_H
#define _ASM_MMSEGMENT_H 1
typedef struct {
unsigned long seg;
} mm_segment_t;
#endif

View file

@ -0,0 +1,20 @@
#ifndef __x86_64_MMU_H
#define __x86_64_MMU_H
#include <linux/spinlock.h>
#include <asm/semaphore.h>
/*
* The x86_64 doesn't have a mmu context, but
* we put the segment information here.
*
* cpu_vm_mask is used to optimize ldt flushing.
*/
typedef struct {
void *ldt;
rwlock_t ldtlock;
int size;
struct semaphore sem;
} mm_context_t;
#endif

View file

@ -0,0 +1,73 @@
#ifndef __X86_64_MMU_CONTEXT_H
#define __X86_64_MMU_CONTEXT_H
#include <asm/desc.h>
#include <asm/atomic.h>
#include <asm/pgalloc.h>
#include <asm/pda.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
/*
* possibly do the LDT unload here?
*/
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void destroy_context(struct mm_struct *mm);
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
#ifdef CONFIG_SMP
if (read_pda(mmu_state) == TLBSTATE_OK)
write_pda(mmu_state, TLBSTATE_LAZY);
#endif
}
static inline void load_cr3(pgd_t *pgd)
{
asm volatile("movq %0,%%cr3" :: "r" (__pa(pgd)) : "memory");
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned cpu = smp_processor_id();
if (likely(prev != next)) {
/* stop flush ipis for the previous mm */
cpu_clear(cpu, prev->cpu_vm_mask);
#ifdef CONFIG_SMP
write_pda(mmu_state, TLBSTATE_OK);
write_pda(active_mm, next);
#endif
cpu_set(cpu, next->cpu_vm_mask);
load_cr3(next->pgd);
if (unlikely(next->context.ldt != prev->context.ldt))
load_LDT_nolock(&next->context, cpu);
}
#ifdef CONFIG_SMP
else {
write_pda(mmu_state, TLBSTATE_OK);
if (read_pda(active_mm) != next)
out_of_line_bug();
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
/* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
*/
load_cr3(next->pgd);
load_LDT_nolock(&next->context, cpu);
}
}
#endif
}
#define deactivate_mm(tsk,mm) do { \
load_gs_index(0); \
asm volatile("movl %0,%%fs"::"r"(0)); \
} while(0)
#define activate_mm(prev, next) \
switch_mm((prev),(next),NULL)
#endif

View file

@ -0,0 +1,14 @@
#ifndef _ASM_MMX_H
#define _ASM_MMX_H
/*
* MMX 3Dnow! helper operations
*/
#include <linux/types.h>
extern void *_mmx_memcpy(void *to, const void *from, size_t size);
extern void mmx_clear_page(void *page);
extern void mmx_copy_page(void *to, void *from);
#endif

View file

@ -0,0 +1,50 @@
/* K8 NUMA support */
/* Copyright 2002,2003 by Andi Kleen, SuSE Labs */
/* 2.5 Version loosely based on the NUMAQ Code by Pat Gaughen. */
#ifndef _ASM_X86_64_MMZONE_H
#define _ASM_X86_64_MMZONE_H 1
#ifdef CONFIG_NUMA
#define VIRTUAL_BUG_ON(x)
#include <asm/smp.h>
/* Should really switch to dynamic allocation at some point */
#define NODEMAPSIZE 0x4fff
/* Simple perfect hash to map physical addresses to node numbers */
struct memnode {
int shift;
u8 map[NODEMAPSIZE];
} ____cacheline_aligned;
extern struct memnode memnode;
#define memnode_shift memnode.shift
#define memnodemap memnode.map
extern struct pglist_data *node_data[];
static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
{
unsigned nid;
VIRTUAL_BUG_ON((addr >> memnode_shift) >= NODEMAPSIZE);
nid = memnodemap[addr >> memnode_shift];
VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]);
return nid;
}
#define NODE_DATA(nid) (node_data[nid])
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
NODE_DATA(nid)->node_spanned_pages)
#ifdef CONFIG_DISCONTIGMEM
#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
extern int pfn_valid(unsigned long pfn);
#endif
#endif
#endif

View file

@ -0,0 +1,15 @@
#ifndef _ASM_X8664_MODULE_H
#define _ASM_X8664_MODULE_H
struct mod_arch_specific {};
#define MODULES_ARE_ELF64
#define Elf_Shdr Elf64_Shdr
#define Elf_Sym Elf64_Sym
#define Elf_Ehdr Elf64_Ehdr
#define Elf_Rel Elf64_Rel
#define Elf_Rela Elf64_Rela
#define ELF_R_TYPE(X) ELF64_R_TYPE(X)
#define ELF_R_SYM(X) ELF64_R_SYM(X)
#endif

View file

@ -0,0 +1,242 @@
#ifndef __ASM_MPSPEC_H
#define __ASM_MPSPEC_H
/*
* Structure definitions for SMP machines following the
* Intel Multiprocessing Specification 1.1 and 1.4.
*/
/*
* This tag identifies where the SMP configuration
* information is.
*/
#define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_')
/*
* A maximum of 255 APICs with the current APIC ID architecture.
*/
#define MAX_APICS 255
struct intel_mp_floating
{
char mpf_signature[4]; /* "_MP_" */
unsigned int mpf_physptr; /* Configuration table address */
unsigned char mpf_length; /* Our length (paragraphs) */
unsigned char mpf_specification;/* Specification version */
unsigned char mpf_checksum; /* Checksum (makes sum 0) */
unsigned char mpf_feature1; /* Standard or configuration ? */
unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */
unsigned char mpf_feature3; /* Unused (0) */
unsigned char mpf_feature4; /* Unused (0) */
unsigned char mpf_feature5; /* Unused (0) */
};
struct mp_config_table
{
char mpc_signature[4];
#define MPC_SIGNATURE "PCMP"
unsigned short mpc_length; /* Size of table */
char mpc_spec; /* 0x01 */
char mpc_checksum;
char mpc_oem[8];
char mpc_productid[12];
unsigned int mpc_oemptr; /* 0 if not present */
unsigned short mpc_oemsize; /* 0 if not present */
unsigned short mpc_oemcount;
unsigned int mpc_lapic; /* APIC address */
unsigned int reserved;
};
/* Followed by entries */
#define MP_PROCESSOR 0
#define MP_BUS 1
#define MP_IOAPIC 2
#define MP_INTSRC 3
#define MP_LINTSRC 4
struct mpc_config_processor
{
unsigned char mpc_type;
unsigned char mpc_apicid; /* Local APIC number */
unsigned char mpc_apicver; /* Its versions */
unsigned char mpc_cpuflag;
#define CPU_ENABLED 1 /* Processor is available */
#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */
unsigned int mpc_cpufeature;
#define CPU_STEPPING_MASK 0x0F
#define CPU_MODEL_MASK 0xF0
#define CPU_FAMILY_MASK 0xF00
unsigned int mpc_featureflag; /* CPUID feature value */
unsigned int mpc_reserved[2];
};
struct mpc_config_bus
{
unsigned char mpc_type;
unsigned char mpc_busid;
unsigned char mpc_bustype[6];
};
/* List of Bus Type string values, Intel MP Spec. */
#define BUSTYPE_EISA "EISA"
#define BUSTYPE_ISA "ISA"
#define BUSTYPE_INTERN "INTERN" /* Internal BUS */
#define BUSTYPE_MCA "MCA"
#define BUSTYPE_VL "VL" /* Local bus */
#define BUSTYPE_PCI "PCI"
#define BUSTYPE_PCMCIA "PCMCIA"
#define BUSTYPE_CBUS "CBUS"
#define BUSTYPE_CBUSII "CBUSII"
#define BUSTYPE_FUTURE "FUTURE"
#define BUSTYPE_MBI "MBI"
#define BUSTYPE_MBII "MBII"
#define BUSTYPE_MPI "MPI"
#define BUSTYPE_MPSA "MPSA"
#define BUSTYPE_NUBUS "NUBUS"
#define BUSTYPE_TC "TC"
#define BUSTYPE_VME "VME"
#define BUSTYPE_XPRESS "XPRESS"
struct mpc_config_ioapic
{
unsigned char mpc_type;
unsigned char mpc_apicid;
unsigned char mpc_apicver;
unsigned char mpc_flags;
#define MPC_APIC_USABLE 0x01
unsigned int mpc_apicaddr;
};
struct mpc_config_intsrc
{
unsigned char mpc_type;
unsigned char mpc_irqtype;
unsigned short mpc_irqflag;
unsigned char mpc_srcbus;
unsigned char mpc_srcbusirq;
unsigned char mpc_dstapic;
unsigned char mpc_dstirq;
};
enum mp_irq_source_types {
mp_INT = 0,
mp_NMI = 1,
mp_SMI = 2,
mp_ExtINT = 3
};
#define MP_IRQDIR_DEFAULT 0
#define MP_IRQDIR_HIGH 1
#define MP_IRQDIR_LOW 3
struct mpc_config_lintsrc
{
unsigned char mpc_type;
unsigned char mpc_irqtype;
unsigned short mpc_irqflag;
unsigned char mpc_srcbusid;
unsigned char mpc_srcbusirq;
unsigned char mpc_destapic;
#define MP_APIC_ALL 0xFF
unsigned char mpc_destapiclint;
};
/*
* Default configurations
*
* 1 2 CPU ISA 82489DX
* 2 2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining
* 3 2 CPU EISA 82489DX
* 4 2 CPU MCA 82489DX
* 5 2 CPU ISA+PCI
* 6 2 CPU EISA+PCI
* 7 2 CPU MCA+PCI
*/
#define MAX_MP_BUSSES 256
/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */
#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
enum mp_bustype {
MP_BUS_ISA = 1,
MP_BUS_EISA,
MP_BUS_PCI,
MP_BUS_MCA
};
extern unsigned char mp_bus_id_to_type [MAX_MP_BUSSES];
extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
extern unsigned int boot_cpu_physical_apicid;
extern int smp_found_config;
extern void find_smp_config (void);
extern void get_smp_config (void);
extern int nr_ioapics;
extern unsigned char apic_version [MAX_APICS];
extern int mp_irq_entries;
extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES];
extern int mpc_default_type;
extern unsigned long mp_lapic_addr;
extern int pic_mode;
#ifdef CONFIG_ACPI
extern void mp_register_lapic (u8 id, u8 enabled);
extern void mp_register_lapic_address (u64 address);
#ifdef CONFIG_X86_IO_APIC
extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base);
extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi);
extern void mp_config_acpi_legacy_irqs (void);
extern int mp_register_gsi (u32 gsi, int triggering, int polarity);
#endif /*CONFIG_X86_IO_APIC*/
#endif
extern int using_apic_timer;
#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
struct physid_mask
{
unsigned long mask[PHYSID_ARRAY_SIZE];
};
typedef struct physid_mask physid_mask_t;
#define physid_set(physid, map) set_bit(physid, (map).mask)
#define physid_clear(physid, map) clear_bit(physid, (map).mask)
#define physid_isset(physid, map) test_bit(physid, (map).mask)
#define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask)
#define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
#define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
#define physids_clear(map) bitmap_zero((map).mask, MAX_APICS)
#define physids_complement(dst, src) bitmap_complement((dst).mask, (src).mask, MAX_APICS)
#define physids_empty(map) bitmap_empty((map).mask, MAX_APICS)
#define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS)
#define physids_weight(map) bitmap_weight((map).mask, MAX_APICS)
#define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS)
#define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS)
#define physids_coerce(map) ((map).mask[0])
#define physids_promote(physids) \
({ \
physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
__physid_mask.mask[0] = physids; \
__physid_mask; \
})
#define physid_mask_of_physid(physid) \
({ \
physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
physid_set(physid, __physid_mask); \
__physid_mask; \
})
#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} }
#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} }
extern physid_mask_t phys_cpu_present_map;
#endif

View file

@ -0,0 +1,27 @@
#ifndef _X8664_MSGBUF_H
#define _X8664_MSGBUF_H
/*
* The msqid64_ds structure for x86-64 architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 2 miscellaneous 64-bit values
*/
struct msqid64_ds {
struct ipc64_perm msg_perm;
__kernel_time_t msg_stime; /* last msgsnd time */
__kernel_time_t msg_rtime; /* last msgrcv time */
__kernel_time_t msg_ctime; /* last change time */
unsigned long msg_cbytes; /* current number of bytes on queue */
unsigned long msg_qnum; /* number of messages in queue */
unsigned long msg_qbytes; /* max number of bytes on queue */
__kernel_pid_t msg_lspid; /* pid of last msgsnd */
__kernel_pid_t msg_lrpid; /* last receive pid */
unsigned long __unused4;
unsigned long __unused5;
};
#endif

View file

@ -0,0 +1,24 @@
/*
* Copyright (C) 2003-2004 Intel
* Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
*/
#ifndef ASM_MSI_H
#define ASM_MSI_H
#include <asm/desc.h>
#include <asm/mach_apic.h>
#include <asm/smp.h>
#define LAST_DEVICE_VECTOR (FIRST_SYSTEM_VECTOR - 1)
#define MSI_TARGET_CPU_SHIFT 12
extern struct msi_ops msi_apic_ops;
static inline int msi_arch_init(void)
{
msi_register(&msi_apic_ops);
return 0;
}
#endif /* ASM_MSI_H */

View file

@ -0,0 +1,410 @@
#ifndef X86_64_MSR_H
#define X86_64_MSR_H 1
#ifndef __ASSEMBLY__
/*
* Access to machine-specific registers (available on 586 and better only)
* Note: the rd* operations modify the parameters directly (without using
* pointer indirection), this allows gcc to optimize better
*/
#define rdmsr(msr,val1,val2) \
__asm__ __volatile__("rdmsr" \
: "=a" (val1), "=d" (val2) \
: "c" (msr))
#define rdmsrl(msr,val) do { unsigned long a__,b__; \
__asm__ __volatile__("rdmsr" \
: "=a" (a__), "=d" (b__) \
: "c" (msr)); \
val = a__ | (b__<<32); \
} while(0)
#define wrmsr(msr,val1,val2) \
__asm__ __volatile__("wrmsr" \
: /* no outputs */ \
: "c" (msr), "a" (val1), "d" (val2))
#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
/* wrmsr with exception handling */
#define wrmsr_safe(msr,a,b) ({ int ret__; \
asm volatile("2: wrmsr ; xorl %0,%0\n" \
"1:\n\t" \
".section .fixup,\"ax\"\n\t" \
"3: movl %4,%0 ; jmp 1b\n\t" \
".previous\n\t" \
".section __ex_table,\"a\"\n" \
" .align 8\n\t" \
" .quad 2b,3b\n\t" \
".previous" \
: "=a" (ret__) \
: "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
ret__; })
#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
#define rdmsr_safe(msr,a,b) \
({ int ret__; \
asm volatile ("1: rdmsr\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: movl %4,%0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 8\n" \
" .quad 1b,3b\n" \
".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\
:"c"(msr), "i"(-EIO), "0"(0)); \
ret__; })
#define rdtsc(low,high) \
__asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
#define rdtscl(low) \
__asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
#define rdtscp(low,high,aux) \
asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
#define rdtscll(val) do { \
unsigned int __a,__d; \
asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
(val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
} while(0)
#define rdtscpll(val, aux) do { \
unsigned long __a, __d; \
asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
(val) = (__d << 32) | __a; \
} while (0)
#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
#define rdpmc(counter,low,high) \
__asm__ __volatile__("rdpmc" \
: "=a" (low), "=d" (high) \
: "c" (counter))
static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
__asm__("cpuid"
: "=a" (*eax),
"=b" (*ebx),
"=c" (*ecx),
"=d" (*edx)
: "0" (op));
}
/* Some CPUID calls want 'count' to be placed in ecx */
static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
int *edx)
{
__asm__("cpuid"
: "=a" (*eax),
"=b" (*ebx),
"=c" (*ecx),
"=d" (*edx)
: "0" (op), "c" (count));
}
/*
* CPUID functions returning a single datum
*/
static inline unsigned int cpuid_eax(unsigned int op)
{
unsigned int eax;
__asm__("cpuid"
: "=a" (eax)
: "0" (op)
: "bx", "cx", "dx");
return eax;
}
static inline unsigned int cpuid_ebx(unsigned int op)
{
unsigned int eax, ebx;
__asm__("cpuid"
: "=a" (eax), "=b" (ebx)
: "0" (op)
: "cx", "dx" );
return ebx;
}
static inline unsigned int cpuid_ecx(unsigned int op)
{
unsigned int eax, ecx;
__asm__("cpuid"
: "=a" (eax), "=c" (ecx)
: "0" (op)
: "bx", "dx" );
return ecx;
}
static inline unsigned int cpuid_edx(unsigned int op)
{
unsigned int eax, edx;
__asm__("cpuid"
: "=a" (eax), "=d" (edx)
: "0" (op)
: "bx", "cx");
return edx;
}
#define MSR_IA32_UCODE_WRITE 0x79
#define MSR_IA32_UCODE_REV 0x8b
#endif
/* AMD/K8 specific MSRs */
#define MSR_EFER 0xc0000080 /* extended feature register */
#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */
#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
#define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */
#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
#define MSR_FS_BASE 0xc0000100 /* 64bit GS base */
#define MSR_GS_BASE 0xc0000101 /* 64bit FS base */
#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow (or USER_GS from kernel) */
/* EFER bits: */
#define _EFER_SCE 0 /* SYSCALL/SYSRET */
#define _EFER_LME 8 /* Long mode enable */
#define _EFER_LMA 10 /* Long mode active (read-only) */
#define _EFER_NX 11 /* No execute enable */
#define EFER_SCE (1<<_EFER_SCE)
#define EFER_LME (1<<_EFER_LME)
#define EFER_LMA (1<<_EFER_LMA)
#define EFER_NX (1<<_EFER_NX)
/* Intel MSRs. Some also available on other CPUs */
#define MSR_IA32_TSC 0x10
#define MSR_IA32_PLATFORM_ID 0x17
#define MSR_IA32_PERFCTR0 0xc1
#define MSR_IA32_PERFCTR1 0xc2
#define MSR_MTRRcap 0x0fe
#define MSR_IA32_BBL_CR_CTL 0x119
#define MSR_IA32_SYSENTER_CS 0x174
#define MSR_IA32_SYSENTER_ESP 0x175
#define MSR_IA32_SYSENTER_EIP 0x176
#define MSR_IA32_MCG_CAP 0x179
#define MSR_IA32_MCG_STATUS 0x17a
#define MSR_IA32_MCG_CTL 0x17b
#define MSR_IA32_EVNTSEL0 0x186
#define MSR_IA32_EVNTSEL1 0x187
#define MSR_IA32_DEBUGCTLMSR 0x1d9
#define MSR_IA32_LASTBRANCHFROMIP 0x1db
#define MSR_IA32_LASTBRANCHTOIP 0x1dc
#define MSR_IA32_LASTINTFROMIP 0x1dd
#define MSR_IA32_LASTINTTOIP 0x1de
#define MSR_MTRRfix64K_00000 0x250
#define MSR_MTRRfix16K_80000 0x258
#define MSR_MTRRfix16K_A0000 0x259
#define MSR_MTRRfix4K_C0000 0x268
#define MSR_MTRRfix4K_C8000 0x269
#define MSR_MTRRfix4K_D0000 0x26a
#define MSR_MTRRfix4K_D8000 0x26b
#define MSR_MTRRfix4K_E0000 0x26c
#define MSR_MTRRfix4K_E8000 0x26d
#define MSR_MTRRfix4K_F0000 0x26e
#define MSR_MTRRfix4K_F8000 0x26f
#define MSR_MTRRdefType 0x2ff
#define MSR_IA32_MC0_CTL 0x400
#define MSR_IA32_MC0_STATUS 0x401
#define MSR_IA32_MC0_ADDR 0x402
#define MSR_IA32_MC0_MISC 0x403
#define MSR_P6_PERFCTR0 0xc1
#define MSR_P6_PERFCTR1 0xc2
#define MSR_P6_EVNTSEL0 0x186
#define MSR_P6_EVNTSEL1 0x187
/* K7/K8 MSRs. Not complete. See the architecture manual for a more complete list. */
#define MSR_K7_EVNTSEL0 0xC0010000
#define MSR_K7_PERFCTR0 0xC0010004
#define MSR_K7_EVNTSEL1 0xC0010001
#define MSR_K7_PERFCTR1 0xC0010005
#define MSR_K7_EVNTSEL2 0xC0010002
#define MSR_K7_PERFCTR2 0xC0010006
#define MSR_K7_EVNTSEL3 0xC0010003
#define MSR_K7_PERFCTR3 0xC0010007
#define MSR_K8_TOP_MEM1 0xC001001A
#define MSR_K8_TOP_MEM2 0xC001001D
#define MSR_K8_SYSCFG 0xC0010010
#define MSR_K8_HWCR 0xC0010015
/* K6 MSRs */
#define MSR_K6_EFER 0xC0000080
#define MSR_K6_STAR 0xC0000081
#define MSR_K6_WHCR 0xC0000082
#define MSR_K6_UWCCR 0xC0000085
#define MSR_K6_PSOR 0xC0000087
#define MSR_K6_PFIR 0xC0000088
/* Centaur-Hauls/IDT defined MSRs. */
#define MSR_IDT_FCR1 0x107
#define MSR_IDT_FCR2 0x108
#define MSR_IDT_FCR3 0x109
#define MSR_IDT_FCR4 0x10a
#define MSR_IDT_MCR0 0x110
#define MSR_IDT_MCR1 0x111
#define MSR_IDT_MCR2 0x112
#define MSR_IDT_MCR3 0x113
#define MSR_IDT_MCR4 0x114
#define MSR_IDT_MCR5 0x115
#define MSR_IDT_MCR6 0x116
#define MSR_IDT_MCR7 0x117
#define MSR_IDT_MCR_CTRL 0x120
/* VIA Cyrix defined MSRs*/
#define MSR_VIA_FCR 0x1107
#define MSR_VIA_LONGHAUL 0x110a
#define MSR_VIA_RNG 0x110b
#define MSR_VIA_BCR2 0x1147
/* Intel defined MSRs. */
#define MSR_IA32_P5_MC_ADDR 0
#define MSR_IA32_P5_MC_TYPE 1
#define MSR_IA32_PLATFORM_ID 0x17
#define MSR_IA32_EBL_CR_POWERON 0x2a
#define MSR_IA32_APICBASE 0x1b
#define MSR_IA32_APICBASE_BSP (1<<8)
#define MSR_IA32_APICBASE_ENABLE (1<<11)
#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
/* P4/Xeon+ specific */
#define MSR_IA32_MCG_EAX 0x180
#define MSR_IA32_MCG_EBX 0x181
#define MSR_IA32_MCG_ECX 0x182
#define MSR_IA32_MCG_EDX 0x183
#define MSR_IA32_MCG_ESI 0x184
#define MSR_IA32_MCG_EDI 0x185
#define MSR_IA32_MCG_EBP 0x186
#define MSR_IA32_MCG_ESP 0x187
#define MSR_IA32_MCG_EFLAGS 0x188
#define MSR_IA32_MCG_EIP 0x189
#define MSR_IA32_MCG_RESERVED 0x18A
#define MSR_P6_EVNTSEL0 0x186
#define MSR_P6_EVNTSEL1 0x187
#define MSR_IA32_PERF_STATUS 0x198
#define MSR_IA32_PERF_CTL 0x199
#define MSR_IA32_THERM_CONTROL 0x19a
#define MSR_IA32_THERM_INTERRUPT 0x19b
#define MSR_IA32_THERM_STATUS 0x19c
#define MSR_IA32_MISC_ENABLE 0x1a0
#define MSR_IA32_DEBUGCTLMSR 0x1d9
#define MSR_IA32_LASTBRANCHFROMIP 0x1db
#define MSR_IA32_LASTBRANCHTOIP 0x1dc
#define MSR_IA32_LASTINTFROMIP 0x1dd
#define MSR_IA32_LASTINTTOIP 0x1de
#define MSR_IA32_MC0_CTL 0x400
#define MSR_IA32_MC0_STATUS 0x401
#define MSR_IA32_MC0_ADDR 0x402
#define MSR_IA32_MC0_MISC 0x403
/* Pentium IV performance counter MSRs */
#define MSR_P4_BPU_PERFCTR0 0x300
#define MSR_P4_BPU_PERFCTR1 0x301
#define MSR_P4_BPU_PERFCTR2 0x302
#define MSR_P4_BPU_PERFCTR3 0x303
#define MSR_P4_MS_PERFCTR0 0x304
#define MSR_P4_MS_PERFCTR1 0x305
#define MSR_P4_MS_PERFCTR2 0x306
#define MSR_P4_MS_PERFCTR3 0x307
#define MSR_P4_FLAME_PERFCTR0 0x308
#define MSR_P4_FLAME_PERFCTR1 0x309
#define MSR_P4_FLAME_PERFCTR2 0x30a
#define MSR_P4_FLAME_PERFCTR3 0x30b
#define MSR_P4_IQ_PERFCTR0 0x30c
#define MSR_P4_IQ_PERFCTR1 0x30d
#define MSR_P4_IQ_PERFCTR2 0x30e
#define MSR_P4_IQ_PERFCTR3 0x30f
#define MSR_P4_IQ_PERFCTR4 0x310
#define MSR_P4_IQ_PERFCTR5 0x311
#define MSR_P4_BPU_CCCR0 0x360
#define MSR_P4_BPU_CCCR1 0x361
#define MSR_P4_BPU_CCCR2 0x362
#define MSR_P4_BPU_CCCR3 0x363
#define MSR_P4_MS_CCCR0 0x364
#define MSR_P4_MS_CCCR1 0x365
#define MSR_P4_MS_CCCR2 0x366
#define MSR_P4_MS_CCCR3 0x367
#define MSR_P4_FLAME_CCCR0 0x368
#define MSR_P4_FLAME_CCCR1 0x369
#define MSR_P4_FLAME_CCCR2 0x36a
#define MSR_P4_FLAME_CCCR3 0x36b
#define MSR_P4_IQ_CCCR0 0x36c
#define MSR_P4_IQ_CCCR1 0x36d
#define MSR_P4_IQ_CCCR2 0x36e
#define MSR_P4_IQ_CCCR3 0x36f
#define MSR_P4_IQ_CCCR4 0x370
#define MSR_P4_IQ_CCCR5 0x371
#define MSR_P4_ALF_ESCR0 0x3ca
#define MSR_P4_ALF_ESCR1 0x3cb
#define MSR_P4_BPU_ESCR0 0x3b2
#define MSR_P4_BPU_ESCR1 0x3b3
#define MSR_P4_BSU_ESCR0 0x3a0
#define MSR_P4_BSU_ESCR1 0x3a1
#define MSR_P4_CRU_ESCR0 0x3b8
#define MSR_P4_CRU_ESCR1 0x3b9
#define MSR_P4_CRU_ESCR2 0x3cc
#define MSR_P4_CRU_ESCR3 0x3cd
#define MSR_P4_CRU_ESCR4 0x3e0
#define MSR_P4_CRU_ESCR5 0x3e1
#define MSR_P4_DAC_ESCR0 0x3a8
#define MSR_P4_DAC_ESCR1 0x3a9
#define MSR_P4_FIRM_ESCR0 0x3a4
#define MSR_P4_FIRM_ESCR1 0x3a5
#define MSR_P4_FLAME_ESCR0 0x3a6
#define MSR_P4_FLAME_ESCR1 0x3a7
#define MSR_P4_FSB_ESCR0 0x3a2
#define MSR_P4_FSB_ESCR1 0x3a3
#define MSR_P4_IQ_ESCR0 0x3ba
#define MSR_P4_IQ_ESCR1 0x3bb
#define MSR_P4_IS_ESCR0 0x3b4
#define MSR_P4_IS_ESCR1 0x3b5
#define MSR_P4_ITLB_ESCR0 0x3b6
#define MSR_P4_ITLB_ESCR1 0x3b7
#define MSR_P4_IX_ESCR0 0x3c8
#define MSR_P4_IX_ESCR1 0x3c9
#define MSR_P4_MOB_ESCR0 0x3aa
#define MSR_P4_MOB_ESCR1 0x3ab
#define MSR_P4_MS_ESCR0 0x3c0
#define MSR_P4_MS_ESCR1 0x3c1
#define MSR_P4_PMH_ESCR0 0x3ac
#define MSR_P4_PMH_ESCR1 0x3ad
#define MSR_P4_RAT_ESCR0 0x3bc
#define MSR_P4_RAT_ESCR1 0x3bd
#define MSR_P4_SAAT_ESCR0 0x3ae
#define MSR_P4_SAAT_ESCR1 0x3af
#define MSR_P4_SSU_ESCR0 0x3be
#define MSR_P4_SSU_ESCR1 0x3bf /* guess: not defined in manual */
#define MSR_P4_TBPU_ESCR0 0x3c2
#define MSR_P4_TBPU_ESCR1 0x3c3
#define MSR_P4_TC_ESCR0 0x3c4
#define MSR_P4_TC_ESCR1 0x3c5
#define MSR_P4_U2L_ESCR0 0x3b0
#define MSR_P4_U2L_ESCR1 0x3b1
#endif

View file

@ -0,0 +1,140 @@
/* Generic MTRR (Memory Type Range Register) ioctls.
Copyright (C) 1997-1999 Richard Gooch
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with this library; if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Richard Gooch may be reached by email at rgooch@atnf.csiro.au
The postal address is:
Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
*/
#ifndef _LINUX_MTRR_H
#define _LINUX_MTRR_H
#include <linux/ioctl.h>
#define MTRR_IOCTL_BASE 'M'
struct mtrr_sentry
{
unsigned long base; /* Base address */
unsigned int size; /* Size of region */
unsigned int type; /* Type of region */
};
/* Warning: this structure has a different order from i386
on x86-64. The 32bit emulation code takes care of that.
But you need to use this for 64bit, otherwise your X server
will break. */
struct mtrr_gentry
{
unsigned long base; /* Base address */
unsigned int size; /* Size of region */
unsigned int regnum; /* Register number */
unsigned int type; /* Type of region */
};
/* These are the various ioctls */
#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
/* These are the region types */
#define MTRR_TYPE_UNCACHABLE 0
#define MTRR_TYPE_WRCOMB 1
/*#define MTRR_TYPE_ 2*/
/*#define MTRR_TYPE_ 3*/
#define MTRR_TYPE_WRTHROUGH 4
#define MTRR_TYPE_WRPROT 5
#define MTRR_TYPE_WRBACK 6
#define MTRR_NUM_TYPES 7
#ifdef __KERNEL__
/* The following functions are for use by other drivers */
# ifdef CONFIG_MTRR
extern int mtrr_add (unsigned long base, unsigned long size,
unsigned int type, char increment);
extern int mtrr_add_page (unsigned long base, unsigned long size,
unsigned int type, char increment);
extern int mtrr_del (int reg, unsigned long base, unsigned long size);
extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
# else
static __inline__ int mtrr_add (unsigned long base, unsigned long size,
unsigned int type, char increment)
{
return -ENODEV;
}
static __inline__ int mtrr_add_page (unsigned long base, unsigned long size,
unsigned int type, char increment)
{
return -ENODEV;
}
static __inline__ int mtrr_del (int reg, unsigned long base,
unsigned long size)
{
return -ENODEV;
}
static __inline__ int mtrr_del_page (int reg, unsigned long base,
unsigned long size)
{
return -ENODEV;
}
#endif /* CONFIG_MTRR */
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
struct mtrr_sentry32
{
compat_ulong_t base; /* Base address */
compat_uint_t size; /* Size of region */
compat_uint_t type; /* Type of region */
};
struct mtrr_gentry32
{
compat_ulong_t regnum; /* Register number */
compat_uint_t base; /* Base address */
compat_uint_t size; /* Size of region */
compat_uint_t type; /* Type of region */
};
#define MTRR_IOCTL_BASE 'M'
#define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32)
#define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32)
#define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32)
#define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32)
#define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32)
#define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32)
#define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32)
#define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32)
#define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32)
#define MTRRIOC32_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32)
#endif /* CONFIG_COMPAT */
#endif /* __KERNEL__ */
#endif /* _LINUX_MTRR_H */

View file

@ -0,0 +1,113 @@
/*
* Assembly implementation of the mutex fastpath, based on atomic
* decrement/increment.
*
* started by Ingo Molnar:
*
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*/
#ifndef _ASM_MUTEX_H
#define _ASM_MUTEX_H
/**
* __mutex_fastpath_lock - decrement and call function if negative
* @v: pointer of type atomic_t
* @fail_fn: function to call if the result is negative
*
* Atomically decrements @v and calls <fail_fn> if the result is negative.
*/
#define __mutex_fastpath_lock(v, fail_fn) \
do { \
unsigned long dummy; \
\
typecheck(atomic_t *, v); \
typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
\
__asm__ __volatile__( \
LOCK_PREFIX " decl (%%rdi) \n" \
" js 2f \n" \
"1: \n" \
\
LOCK_SECTION_START("") \
"2: call "#fail_fn" \n" \
" jmp 1b \n" \
LOCK_SECTION_END \
\
:"=D" (dummy) \
: "D" (v) \
: "rax", "rsi", "rdx", "rcx", \
"r8", "r9", "r10", "r11", "memory"); \
} while (0)
/**
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value
* @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
*
* Change the count from 1 to a value lower than 1, and call <fail_fn> if
* it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
* or anything the slow path function returns
*/
static inline int
__mutex_fastpath_lock_retval(atomic_t *count,
int fastcall (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count);
else
return 0;
}
/**
* __mutex_fastpath_unlock - increment and call function if nonpositive
* @v: pointer of type atomic_t
* @fail_fn: function to call if the result is nonpositive
*
* Atomically increments @v and calls <fail_fn> if the result is nonpositive.
*/
#define __mutex_fastpath_unlock(v, fail_fn) \
do { \
unsigned long dummy; \
\
typecheck(atomic_t *, v); \
typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
\
__asm__ __volatile__( \
LOCK_PREFIX " incl (%%rdi) \n" \
" jle 2f \n" \
"1: \n" \
\
LOCK_SECTION_START("") \
"2: call "#fail_fn" \n" \
" jmp 1b \n" \
LOCK_SECTION_END \
\
:"=D" (dummy) \
: "D" (v) \
: "rax", "rsi", "rdx", "rcx", \
"r8", "r9", "r10", "r11", "memory"); \
} while (0)
#define __mutex_slowpath_needs_to_unlock() 1
/**
* __mutex_fastpath_trylock - try to acquire the mutex, without waiting
*
* @count: pointer of type atomic_t
* @fail_fn: fallback function
*
* Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
* if it wasn't 1 originally. [the fallback function is never used on
* x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
*/
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
if (likely(atomic_cmpxchg(count, 1, 0) == 1))
return 1;
else
return 0;
}
#endif

View file

@ -0,0 +1,11 @@
#ifndef __X8664_NAMEI_H
#define __X8664_NAMEI_H
/* This dummy routine maybe changed to something useful
* for /usr/gnemul/ emulation stuff.
* Look at asm-sparc/namei.h for details.
*/
#define __emul_prefix() NULL
#endif

View file

@ -0,0 +1,79 @@
#ifndef _ASM_X86_64_NETDUMP_H_
#define _ASM_X86_64_NETDUMP_H_
/*
* linux/include/asm-x86_64/netdump.h
*
* Copyright (c) 2003, 2004 Red Hat, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifdef __KERNEL__
#include <asm/crashdump.h>
const static int platform_supports_netdump = 1;
#define platform_machine_type() (EM_X86_64)
#define platform_page_is_ram(x) (page_is_ram(x) && \
kern_addr_valid((unsigned long)pfn_to_kaddr(x)))
static inline unsigned char platform_effective_version(req_t *req)
{
if (req->from > 0)
return min_t(unsigned char, req->from, NETDUMP_VERSION_MAX);
else
return 0;
}
#define platform_max_pfn() (num_physpages)
static inline u32 platform_next_available(unsigned long pfn)
{
unsigned long pgnum = next_ram_page(pfn);
if (pgnum < platform_max_pfn()) {
return (u32)pgnum;
}
return 0;
}
static inline void platform_jiffy_cycles(unsigned long long *jcp)
{
unsigned long long t0, t1;
platform_timestamp(t0);
netdump_mdelay(1);
platform_timestamp(t1);
if (t1 > t0)
*jcp = t1 - t0;
}
static inline unsigned int platform_get_regs(char *tmp, struct pt_regs *myregs)
{
elf_gregset_t elf_regs;
char *tmp2;
tmp2 = tmp + sprintf(tmp, "Sending register info.\n");
ELF_CORE_COPY_REGS(elf_regs, myregs);
memcpy(tmp2, &elf_regs, sizeof(elf_regs));
return(strlen(tmp) + sizeof(elf_regs));
}
#endif /* __KERNEL__ */
#endif /* _ASM_X86_64_NETDUMP_H */

View file

@ -0,0 +1,77 @@
/*
* linux/include/asm-i386/nmi.h
*/
#ifndef ASM_NMI_H
#define ASM_NMI_H
#include <linux/pm.h>
#include <asm/io.h>
struct pt_regs;
typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
/**
* set_nmi_callback
*
* Set a handler for an NMI. Only one handler may be
* set. Return 1 if the NMI was handled.
*/
void set_nmi_callback(nmi_callback_t callback);
/**
* unset_nmi_callback
*
* Remove the handler previously set.
*/
void unset_nmi_callback(void);
#ifdef CONFIG_PM
/** Replace the PM callback routine for NMI. */
struct pm_dev * set_nmi_pm_callback(pm_callback callback);
/** Unset the PM callback routine back to the default. */
void unset_nmi_pm_callback(struct pm_dev * dev);
#else
static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback)
{
return 0;
}
static inline void unset_nmi_pm_callback(struct pm_dev * dev)
{
}
#endif /* CONFIG_PM */
extern void default_do_nmi(struct pt_regs *);
extern void die_nmi(char *str, struct pt_regs *regs);
#define get_nmi_reason() inb(0x61)
extern int panic_on_timeout;
extern int unknown_nmi_panic;
extern int check_nmi_watchdog(void);
extern void setup_apic_nmi_watchdog (void);
extern int reserve_lapic_nmi(void);
extern void release_lapic_nmi(void);
extern void disable_timer_nmi_watchdog(void);
extern void enable_timer_nmi_watchdog(void);
extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
extern void nmi_watchdog_default(void);
extern int setup_nmi_watchdog(char *);
extern unsigned int nmi_watchdog;
#define NMI_DEFAULT -1
#define NMI_NONE 0
#define NMI_IO_APIC 1
#define NMI_LOCAL_APIC 2
#define NMI_INVALID 3
#endif /* ASM_NMI_H */

View file

@ -0,0 +1 @@
#include <asm-i386/node.h>

View file

@ -0,0 +1,38 @@
#ifndef _ASM_X8664_NUMA_H
#define _ASM_X8664_NUMA_H 1
#include <linux/nodemask.h>
struct bootnode {
u64 start,end;
};
extern int compute_hash_shift(struct bootnode *nodes, int numnodes);
#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
extern void numa_add_cpu(int cpu);
extern void numa_init_array(void);
extern int numa_off;
extern void numa_set_node(int cpu, int node);
extern void srat_reserve_add_area(int nodeid);
extern int hotadd_percent;
extern unsigned char apicid_to_node[256];
#ifdef CONFIG_NUMA
extern void __init init_cpu_to_node(void);
static inline void clear_node_cpumask(int cpu)
{
clear_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
}
#else
#define init_cpu_to_node() do {} while (0)
#define clear_node_cpumask(cpu) do {} while (0)
#endif
#define NUMA_NO_NODE 0xff
#endif

View file

@ -0,0 +1,13 @@
#ifndef _ASM_X8664_NUMNODES_H
#define _ASM_X8664_NUMNODES_H 1
#include <linux/config.h>
/* Implement this change only for large-SMP configuration */
#if NR_CPUS > 8
#define NODES_SHIFT 6
#else
#define NODES_SHIFT 3
#endif
#endif

Some files were not shown because too many files have changed in this diff Show more