ESXi-5.0-GA

This commit is contained in:
unknown 2015-10-23 15:48:45 -04:00
parent 0d186246d2
commit 763922b583
1764 changed files with 754695 additions and 647927 deletions

View file

@ -0,0 +1,10 @@
/*
* DO NOT EDIT THIS FILE - IT IS GENERATED BY THE DRIVER BUILD.
*
* If you need to change the driver's name spaces, look in the scons
* files for the driver's defineVmkDriver() rule.
*/
VMK_NAMESPACE_PROVIDES("com.vmware.vmkplexer", "1.0");
#define VMKLNX_MY_NAMESPACE_VERSION "1.0"

View file

@ -314,7 +314,7 @@ static __inline__ void atomic64_set(atomic64_t *v, long i)
*/
__asm__ __volatile__(
"movq %1, %0"
: "+m" (v->counter)
: "=m" (v->counter)
: "r" (i)
);
}

View file

@ -1,6 +1,26 @@
#ifndef __ASM_X8664_BUG_H
#define __ASM_X8664_BUG_H 1
#if defined(__VMKLNX__)
/*
* For vmklinux, the trick below doesn't work. It works by raising a
* ud2 exception, and then the exception handler knows the rip is
* pointing to a struct bug_frame. However, pushq can only have a
* 32-bit immediate, so we can't push the 64-address of __FILE__ into
* it (this works on linux, because linux is loaded at -2Gb, and when
* 'signed int filename' is cast to long it is sign-extended, the top
* bits filled to 0xffff... and the address is correct. Besides this,
* the vmkernel ud2 exception handler doesn't know anything about
* this. So the long and the short of it is - we don't do any of this
* arch specific BUG() stuff, and just fall back to the generic
* panic()
*/
#include <asm-generic/bug.h>
#else /* !defined(__VMKLNX__) */
#include <linux/stringify.h>
/*
@ -21,23 +41,7 @@ struct bug_frame {
the disassembler. Thanks to Jan Beulich & Suresh Siddha
for nice instruction selection.
The magic numbers generate mov $64bitimm,%eax ; ret $offset. */
/**
* BUG - Prints a message
*
* Prints a log message
*
* SYNOPSIS:
* #define BUG()
*
* ESX Deviation Notes:
* As in x86_64 most variants of Linux, generates a
* system stop (panic).
*
* RETURN VALUE:
* None
*
*/
/* _VMKLNX_CODECHECK_: BUG */
#define BUG() \
asm volatile( \
"ud2 ; pushq $%c1 ; ret $%c0" :: \
@ -49,3 +53,5 @@ static inline void out_of_line_bug(void) { }
#include <asm-generic/bug.h>
#endif
#endif /* defined(__VMKLNX __) */

View file

@ -209,18 +209,6 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
return (u32)(unsigned long)uptr;
}
/**
* compat_alloc_user_space - <1 Line Description>
* @<arg1>: <first argument description>
* @<arg2>: <second argument description>
*
* <full description>
*
* ESX Deviation Notes:
* <Describes how ESX implementation is different from standard Linux.>
*
*/
/* _VMKLNX_CODECHECK_: compat_alloc_user_space */
#if !defined(__VMKLNX__)
static __inline__ void __user *compat_alloc_user_space(long len)
{

View file

@ -78,7 +78,10 @@
#define X86_FEATURE_CID (4*32+10) /* Context ID */
#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
#if defined(__VMKLNX__)
/* From Linux 2.6.29 arch/x86/include/asm/cpufeature.h */
#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */
#endif /* defined(__VMKLNX__) */
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
#define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */
#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */

View file

@ -1,5 +1,5 @@
/*
* Portions Copyright 2008, 2009 VMware, Inc.
* Portions Copyright 2008-2010 VMware, Inc.
*/
#ifndef _X86_64_CURRENT_H
#define _X86_64_CURRENT_H
@ -9,6 +9,8 @@ struct task_struct;
#include <asm/pda.h>
#if defined(__VMKLNX__)
#include <vmklinux_dist.h>
/**
* get_current - Gets current task pointer for the current world.
*
@ -20,14 +22,15 @@ struct task_struct;
/* _VMKLNX_CODECHECK_: get_current */
static inline struct task_struct *get_current(void)
{
#if defined(__VMKLNX__)
extern struct task_struct *vmklnx_GetCurrent(void);
return vmklnx_GetCurrent();
}
#else /* !defined(__VMKLNX__) */
static inline struct task_struct *get_current(void)
{
struct task_struct *t = read_pda(pcurrent);
return t;
#endif /* defined(__VMKLNX__) */
}
#endif /* defined(__VMKLNX__) */
/**
* current - Get current task pointer of current task

View file

@ -72,7 +72,66 @@ extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
#if !defined(__VMKLNX__)
#if defined(__VMKLNX__)
/*
* DMA mapping functions on vmklinux are not inlined so as to
* support further revision and improvements for the behavior of
* of stable third-party binary drivers using these functions.
*/
extern int dma_mapping_error(dma_addr_t dma_addr);
#define dma_unmap_page(dev,dma_address,size,dir) \
dma_unmap_single(dev,dma_address,size,dir)
extern dma_addr_t
dma_map_single(struct device *hwdev, void *ptr, size_t size,
int direction);
extern dma_addr_t
dma_map_page(struct device *hwdev, struct page *page, unsigned long offset,
size_t size, int direction);
extern void
dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
int direction);
extern void
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
size_t size, int direction);
extern void
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
size_t size, int direction);
extern void
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
unsigned long offset, size_t size, int direction);
extern void
dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
unsigned long offset, size_t size, int direction);
extern void
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
int nelems, int direction);
extern void
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, int direction);
extern int
dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction);
extern void
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
int direction);
struct vmklnx_codma;
extern struct vmklnx_codma vmklnx_codma;
extern int vmklnx_dma_supported(struct vmklnx_codma *codma,
struct device *hwdev, u64 mask);
static inline int dma_supported(struct device *hwdev, u64 mask)
{
return vmklnx_dma_supported(&vmklnx_codma, hwdev, mask);
}
#else /* !defined(__VMKLNX__) */
/**
* dma_mapping_error - Check a bus address for a mapping error
* @dma_addr: bus address previously returned by dma_map_single or dma_map_page
@ -325,58 +384,7 @@ dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
extern int dma_supported(struct device *hwdev, u64 mask);
#else /* defined(__VMKLNX__) */
extern int dma_mapping_error(dma_addr_t dma_addr);
/* In vmklinux, virt_to_phys & phys_to_virt are real function calls, so
* until support for IOMMU is added, it's good to avoid call to
* dma_map_single() here since nommu_map_single() calls virt_to_phys().
*/
#define dma_map_page(dev,page,offset,size,dir) \
(((dma_addr_t) page_to_phys(page)) + (offset))
#define dma_unmap_page(dev,dma_address,size,dir) do { } while(0)
extern dma_addr_t
dma_map_single(struct device *hwdev, void *ptr, size_t size,
int direction);
extern void
dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
int direction);
extern void
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
size_t size, int direction);
extern void
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
size_t size, int direction);
extern void
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
unsigned long offset, size_t size, int direction);
extern void
dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
unsigned long offset, size_t size, int direction);
extern void
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
int nelems, int direction);
extern void
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, int direction);
extern int
dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction);
extern void
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
int direction);
struct vmklnx_codma;
extern struct vmklnx_codma vmklnx_codma;
extern int vmklnx_dma_supported(struct vmklnx_codma *codma,
struct device *hwdev, u64 mask);
static inline int dma_supported(struct device *hwdev, u64 mask)
{
return vmklnx_dma_supported(&vmklnx_codma, hwdev, mask);
}
#endif /* defined(__VMKLNX__) */
#endif /* !defined(__VMKLNX__) */
/* same for gart, swiotlb, and nommu */
static inline int dma_get_cache_alignment(void)
@ -386,35 +394,7 @@ static inline int dma_get_cache_alignment(void)
#define dma_is_consistent(h) 1
#if !defined(__VMKLNX__)
extern int dma_set_mask(struct device *dev, u64 mask);
#else /* defined(__VMKLNX__) */
/**
* dma_set_mask - Set a device's allowable DMA mask
* @dev: device whose mask is being set
* @mask: address bitmask
*
* Sets the range in which the device can perform DMA operations.
* The mask, when bitwise-ANDed with an arbitrary machine address, expresses
* the DMA addressability of the device. This mask is used by dma_mapping
* functions (ie, dma_alloc_coherent, dma_pool_alloc) to guarantee that the
* memory allocated is usable by the device.
*
* RETURN VALUE:
* 0 on success
* -EIO if the given mask cannot be used for DMA on the system, or if the
* dma_mask has not been previously initialized.
*
*/
/* _VMKLNX_CODECHECK_: dma_set_mask */
static inline int dma_set_mask(struct device *dev, u64 mask)
{
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
#endif /* !defined(__VMKLNX__) */
static inline void
dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)

View file

@ -50,7 +50,7 @@
* In case of any code change below, make sure you
* go and update the corresponding documentation.
* The documentation file can be found at
* vmkdrivers/src26/doc/dummyDefs.doc
* vmkdrivers/src_9/doc/dummyDefs.doc
*
* outb
* outb_p
@ -171,17 +171,13 @@ static inline void * phys_to_virt(unsigned long address)
*/
#if defined(__VMKLNX__)
/**
* phys_to_page - machine address to a page handle
* phys_to_page - machine address to page handle
* @maddr : machine address
*
* Converts machine address @maddr to a page handle
*
* SYNOPSIS:
* #define phys_to_page(maddr)
*
* ESX Deviation Notes:
* The resulting page handle cannot be derefenced.
* This page handle needs to be handled through the page api only.
* The resulting page handle cannot be derefenced. The returned value
* doesn't correspond to an address of page structure but to the actual page
* number. This page handle needs to be handled through the page api only.
*
*/
/* _VMKLNX_CODECHECK_: phys_to_page */
@ -189,15 +185,10 @@ static inline void * phys_to_virt(unsigned long address)
/**
* page_to_phys - page handle to machine address
* @page: pointer to page handle
* @maddr : machine address
*
* Converts page handle @page to a machine address.
*
* SYNOPSIS:
* #define page_to_phys(page)
*
* RETURN VALUE:
* Machine address of the page of type dma_addr_t
* ESX Deviation Notes:
* None.
*
*/
/* _VMKLNX_CODECHECK_: page_to_phys */
@ -252,8 +243,10 @@ extern void iounmap(volatile void __iomem *addr);
*
* Allow them on x86 for legacy drivers, though.
*/
#if !defined(__VMKLNX__)
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
#endif
/*
* readX/writeX() are used to access memory mapped devices. On some
@ -462,6 +455,23 @@ static inline void __writel(__u32 b, volatile void __iomem *addr)
{
*(__force volatile __u32 *)addr = b;
}
/**
*__writeq - write a u64 value to I/O device memory
* @b: the u64 value to be written
* @addr: the iomapped memory address that is obtained from ioremap()
* or from ioremap_nocache()
*
* This is an internal function. Please call writeq instead.
*
* RETURN VALUE:
* This function does not return a value.
*
* SEE ALSO:
* ioremap(), ioremap_nocache()
*
*/
/* _VMKLNX_CODECHECK_: __writeq */
static inline void __writeq(__u64 b, volatile void __iomem *addr)
{
*(__force volatile __u64 *)addr = b;

View file

@ -63,18 +63,6 @@ static inline void raw_local_irq_enable(void)
raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18));
}
/**
* raw_irqs_disabled_flags - <1 Line Description>
* @<arg1>: <first argument description>
* @<arg2>: <second argument description>
*
* <full description>
*
* ESX Deviation Notes:
* <Describes how ESX implementation is different from standard Linux.>
*
*/
/* _VMKLNX_CODECHECK_: raw_irqs_disabled_flags */
static inline int raw_irqs_disabled_flags(unsigned long flags)
{
return !(flags & (1<<9)) || (flags & (1 << 18));
@ -92,6 +80,18 @@ static inline void raw_local_irq_enable(void)
__asm__ __volatile__("sti" : : : "memory");
}
/**
* raw_irqs_disabled_flags - Check the irq status in the processor flags
* @flags: processor flags
*
* Tests the processor flags to see if the interrupt flag is disabled.
* The ESX implementation is x86 specific.
*
* RETURN VALUE:
* Returns 1 if the irq flags are disabled
*
*/
/* _VMKLNX_CODECHECK_: raw_irqs_disabled_flags */
static inline int raw_irqs_disabled_flags(unsigned long flags)
{
return !(flags & (1 << 9));

Some files were not shown because too many files have changed in this diff Show more