ESXi-5.0-U3

This commit is contained in:
unknown 2015-10-23 18:10:48 -04:00
parent 1efda0e305
commit 052c0cbc4b
9 changed files with 173 additions and 114 deletions

View file

@ -1,6 +1,6 @@
#define BUILD_NUMBER "build-920308"
#define BUILD_NUMBER_NUMERIC 920308
#define BUILD_NUMBER_NUMERIC_STRING "920308"
#define PRODUCT_BUILD_NUMBER "product-build-53817"
#define PRODUCT_BUILD_NUMBER_NUMERIC 53817
#define PRODUCT_BUILD_NUMBER_NUMERIC_STRING "53817"
#define BUILD_NUMBER "build-1312038"
#define BUILD_NUMBER_NUMERIC 1312038
#define BUILD_NUMBER_NUMERIC_STRING "1312038"
#define PRODUCT_BUILD_NUMBER "product-build-63466"
#define PRODUCT_BUILD_NUMBER_NUMERIC 63466
#define PRODUCT_BUILD_NUMBER_NUMERIC_STRING "63466"

View file

@ -11924,8 +11924,8 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
u8 phy_fw_ver[PHY_FW_VER_LEN];
strcpy(info->driver, DRV_MODULE_NAME);
strcpy(info->version, DRV_MODULE_VERSION);
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
phy_fw_ver[0] = '\0';
if (bp->port.pmf) {
@ -11936,14 +11936,14 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
bnx2x_release_phy_lock(bp);
}
strncpy(info->fw_version, bp->fw_ver, 32);
strlcpy(info->fw_version, bp->fw_ver, sizeof(info->fw_version));
snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
"bc %d.%d.%d%s%s",
(bp->common.bc_ver & 0xff0000) >> 16,
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff),
((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
strcpy(info->bus_info, pci_name(bp->pdev));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = BNX2X_NUM_STATS;
info->testinfo_len = BNX2X_NUM_TESTS;
info->eedump_len = bp->common.flash_size;

View file

@ -3733,14 +3733,14 @@ static irqreturn_t e1000_intr(int irq, void *data)
}
#ifdef CONFIG_E1000_NAPI
/* XXX only using ring 0 for napi */
#if !defined(__VMKLNX__)
/* XXX only using ring 0 for napi */
if (likely(netif_rx_schedule_prep(netdev, &adapter->rx_ring[0].napi))) {
#else /* defined(__VMKLNX__) */
if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
/* disable interrupts, without the synchronize_irq bit */
E1000_WRITE_REG(hw, E1000_IMC, ~0);
E1000_WRITE_FLUSH(&adapter->hw);
if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
#endif /* !defined(__VMKLNX__) */
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;

View file

@ -29,7 +29,7 @@
/*
* Source file for NIC routines to access the Phantom hardware
*
* $Id: //depot/vmkdrivers/esx50u2/src_9/drivers/net/nx_nic/unm_nic_hw.c#1 $
* $Id: //depot/vmkdrivers/esx50u3/src_9/drivers/net/nx_nic/unm_nic_hw.c#1 $
*
*/
#include <linux/delay.h>

View file

@ -478,14 +478,15 @@ hpsa_get_target_sas_identifier(struct scsi_target *starget, u64 *sas_id)
static int hpsa_proc_get_info(char *buffer, char **start, off_t offset,
int length, int *eof, void *data)
{
off_t pos = 0;
off_t len = 0;
int size, i, ctlr;
int logicals = 0;
struct ctlr_info *h = (struct ctlr_info *) data;
struct hpsa_scsi_dev_t *drv;
unsigned long flags;
#define HPSA_MAXPROCINFO_LINE 256
char line[HPSA_MAXPROCINFO_LINE];
static int loop_resume = 0;
ctlr = h->ctlr;
/* prevent displaying bogus info during configuration
@ -499,6 +500,8 @@ static int hpsa_proc_get_info(char *buffer, char **start, off_t offset,
h->busy_initializing = 1;
spin_unlock_irqrestore(&h->lock, flags);
if(!offset)
{
/* count the logical disk devices */
for (i = 0; i < h->ndevices; i++) {
drv = h->dev[i];
@ -531,9 +534,10 @@ static int hpsa_proc_get_info(char *buffer, char **start, off_t offset,
logicals, h->Qdepth, h->commands_outstanding,
h->maxQsinceinit, h->max_outstanding, h->maxSG,
h->nr_cmds, h->scsi_host->host_no);
pos += size;
len += size;
for (i = 0; i < h->ndevices; i++) {
}
for (i = loop_resume; i < h->ndevices; i++) {
drv = h->dev[i];
if (drv == NULL )
continue;
@ -545,7 +549,7 @@ static int hpsa_proc_get_info(char *buffer, char **start, off_t offset,
if (drv->raid_level > 5)
drv->raid_level = RAID_UNKNOWN;
size = sprintf(buffer + len, "hpsa%d/"
size = snprintf(line, HPSA_MAXPROCINFO_LINE, "hpsa%d/"
"C%d:B%d:T%d:L%d"
"\t%s\t%.16s\t%.4s\tRAID %s\n",
ctlr, h->scsi_host->host_no,
@ -553,15 +557,25 @@ static int hpsa_proc_get_info(char *buffer, char **start, off_t offset,
scsi_device_type(drv->devtype),
drv->model, drv->revision,
raid_label[drv->raid_level]);
pos += size;
/* avoid buffer overflow */
if ((len + size) > length) {
loop_resume = i;
break;
}
sprintf(buffer + len, "%s", line);
len += size;
}
if (len == 0 ||
i == h->ndevices) {
*eof = 1;
*start = buffer + offset;
len -= offset;
if (len > length)
len = length;
loop_resume = 0;
}
else {
*eof = 0;
}
*start = buffer;
h->busy_initializing = 0;
return len;
}

View file

@ -149,6 +149,16 @@ void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
}
EXPORT_SYMBOL_GPL(usb_anchor_urb);
/* Callers must hold anchor->lock */
static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor)
{
urb->anchor = NULL;
list_del(&urb->anchor_list);
usb_put_urb(urb);
if (list_empty(&anchor->urb_list))
wake_up(&anchor->wait);
}
/**
* usb_unanchor_urb - unanchors an URB
* @urb: pointer to the urb to anchor
@ -168,17 +178,13 @@ void usb_unanchor_urb(struct urb *urb)
return;
spin_lock_irqsave(&anchor->lock, flags);
if (unlikely(anchor != urb->anchor)) {
/* we've lost the race to another thread */
/* At this point, we could be competing with another thread which
* has the same intention. To protect the urb from being unanchored
* twice, only the winner of the race gets the job.
*/
if (likely(anchor == urb->anchor))
__usb_unanchor_urb(urb, anchor);
spin_unlock_irqrestore(&anchor->lock, flags);
return;
}
urb->anchor = NULL;
list_del(&urb->anchor_list);
spin_unlock_irqrestore(&anchor->lock, flags);
usb_put_urb(urb);
if (list_empty(&anchor->urb_list))
wake_up(&anchor->wait);
}
EXPORT_SYMBOL_GPL(usb_unanchor_urb);
@ -828,20 +834,11 @@ EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs);
void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
{
struct urb *victim;
unsigned long flags;
spin_lock_irqsave(&anchor->lock, flags);
while (!list_empty(&anchor->urb_list)) {
victim = list_entry(anchor->urb_list.prev, struct urb,
anchor_list);
usb_get_urb(victim);
spin_unlock_irqrestore(&anchor->lock, flags);
/* this will unanchor the URB */
while ((victim = usb_get_from_anchor(anchor)) != NULL) {
usb_unlink_urb(victim);
usb_put_urb(victim);
spin_lock_irqsave(&anchor->lock, flags);
}
spin_unlock_irqrestore(&anchor->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);
@ -878,13 +875,13 @@ struct urb *usb_get_from_anchor(struct usb_anchor *anchor)
victim = list_entry(anchor->urb_list.next, struct urb,
anchor_list);
usb_get_urb(victim);
spin_unlock_irqrestore(&anchor->lock, flags);
usb_unanchor_urb(victim);
__usb_unanchor_urb(victim, anchor);
} else {
spin_unlock_irqrestore(&anchor->lock, flags);
victim = NULL;
}
spin_unlock_irqrestore(&anchor->lock, flags);
return victim;
}
@ -905,12 +902,7 @@ void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
while (!list_empty(&anchor->urb_list)) {
victim = list_entry(anchor->urb_list.prev, struct urb,
anchor_list);
usb_get_urb(victim);
spin_unlock_irqrestore(&anchor->lock, flags);
/* this may free the URB */
usb_unanchor_urb(victim);
usb_put_urb(victim);
spin_lock_irqsave(&anchor->lock, flags);
__usb_unanchor_urb(victim, anchor);
}
spin_unlock_irqrestore(&anchor->lock, flags);
}

View file

@ -299,6 +299,40 @@ typedef enum {
NETPOLL_BACKUP = 1,
} vmklnx_poll_type;
/*
* since all pointers are 4 bytes or even 8 bytes aligned,
* let's simply embed the poll_type in the lower bits of vmk_NetPoll->priv
* final pointer value = (original priv pointer | poll_type)
*/
#define POLLPRIV_TYPE_BITS 1
#define POLLPRIV_TYPE_MASK ((1 << POLLPRIV_TYPE_BITS) - 1)
static inline void *pollpriv_embed(void *priv, vmklnx_poll_type poll_type)
{
VMK_ASSERT(priv);
VMK_ASSERT((((unsigned long) priv) & POLLPRIV_TYPE_MASK) == 0);
VMK_ASSERT(poll_type <= POLLPRIV_TYPE_MASK);
return (void *)(((unsigned long )priv) | poll_type);
}
static inline vmklnx_poll_type pollpriv_type(void *priv)
{
VMK_ASSERT(priv);
return (vmklnx_poll_type)(((unsigned long)priv) & POLLPRIV_TYPE_MASK);
}
static inline struct napi_struct *pollpriv_napi(void *priv)
{
VMK_ASSERT(pollpriv_type(priv) == NETPOLL_DEFAULT);
return (struct napi_struct *) (((unsigned long)priv) & (~POLLPRIV_TYPE_MASK));
}
static inline struct net_device *pollpriv_net_device(void *priv)
{
VMK_ASSERT(pollpriv_type(priv) == NETPOLL_BACKUP);
return (struct net_device *) (((unsigned long)priv) & (~POLLPRIV_TYPE_MASK));
}
struct napi_struct {
unsigned long state;
int weight;

View file

@ -427,7 +427,7 @@ int
netif_receive_skb(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
vmk_NetPoll pollPriv;
vmk_NetPoll netPoll;
vmk_Worldlet wdt;
struct napi_struct *napi = NULL;
vmk_PktHandle *pkt;
@ -440,7 +440,7 @@ netif_receive_skb(struct sk_buff *skb)
* skb->napi->rxPktList.
*/
if (skb->napi == NULL) {
if (unlikely(vmk_WorldletGetCurrent(&wdt, (void **)&pollPriv) != VMK_OK)) {
if (unlikely(vmk_WorldletGetCurrent(&wdt, (void **)&netPoll) != VMK_OK)) {
VMK_ASSERT(VMK_FALSE);
dev_kfree_skb_any(skb);
dev->linnet_rx_dropped++;
@ -449,14 +449,26 @@ netif_receive_skb(struct sk_buff *skb)
} else {
/*
* When the system is in the panic/debug status, the current worldlet is the
* debug worldlet rather than the napi_poll worldlet. In this case, put the
* debug worldlet rather than the NetPollWorldlet. In this case, put the
* arrived packets into debugPktList. This list will be processed by
* FlushRxBuffers, because netdump/netdebug will bypass the vswitch to read
* the packets.
*/
if (vmk_NetPollGetCurrent(&pollPriv) == VMK_OK) {
napi = (struct napi_struct *)vmk_NetPollGetPrivate(pollPriv);
if (vmk_NetPollGetCurrent(&netPoll) == VMK_OK) {
void *priv = vmk_NetPollGetPrivate(netPoll);
if (pollpriv_type(priv) == NETPOLL_DEFAULT) {
napi = pollpriv_napi(priv);
} else {
spin_lock(&dev->napi_lock);
list_for_each_entry(napi, &dev->napi_list, dev_list) {
if (napi->dev_poll && test_bit(NAPI_STATE_SCHED, &napi->state)) {
break;
}
}
spin_unlock(&dev->napi_lock);
}
}
if (!napi || vmk_SystemCheckState(VMK_SYSTEM_STATE_PANIC)) {
pkt = skb->pkt;
status = map_skb_to_pkt(skb);
@ -475,7 +487,7 @@ netif_receive_skb(struct sk_buff *skb)
}
goto done;
} else {
VMK_ASSERT(pollPriv != NULL);
VMK_ASSERT(netPoll != NULL);
skb->napi = napi;
}
}
@ -522,7 +534,7 @@ static vmk_Bool
napi_poll(void *ptr)
{
VMK_ReturnStatus status = VMK_OK;
struct napi_struct *napi = (struct napi_struct *)ptr;
struct napi_struct *napi = pollpriv_napi(ptr);
/*
* napi_schedule_prep()/napi_schedule() depend on accurately seeing whether
@ -574,9 +586,9 @@ napi_poll(void *ptr)
*/
static vmk_Bool
netdev_poll(void *private)
netdev_poll(void *ptr)
{
struct net_device *dev = private;
struct net_device *dev = pollpriv_net_device(ptr);
vmk_Bool needWork;
struct napi_struct *napi;
VMK_ReturnStatus status = VMK_OK;
@ -646,7 +658,7 @@ napi_poll_init(struct napi_struct *napi)
napi->vector = 0;
pollInit.poll = napi_poll;
pollInit.priv = napi;
pollInit.priv = pollpriv_embed(napi, NETPOLL_DEFAULT);
if (napi->dev->features & NETIF_F_CNA) {
pollInit.deliveryCallback = LinuxCNA_Poll;
@ -732,7 +744,7 @@ netdev_poll_init(struct net_device *dev)
dev->napi_wdt_priv.napi = NULL;
pollInit.poll = netdev_poll;
pollInit.priv = dev;
pollInit.priv = pollpriv_embed(dev, NETPOLL_BACKUP);
if (dev->features & NETIF_F_CNA) {
pollInit.deliveryCallback = LinuxCNADev_Poll;
@ -1154,6 +1166,15 @@ skb_gen_pkt_frags(struct sk_buff *skb)
}
}
/*
* PR 922606:
* Set skb frag ownership to vmkernel, such that vmklinux won't try
* to free those MPNs if the skb needs to be dropped later. Instead,
* the frags/SGs will be freed when the associated pkt is destroyed
* by vmkernel.
*/
vmklnx_set_skb_frags_owner_vmkernel(skb);
/*
* Since we removed packet completion in vmklinux, we
* cannot support skb chaining anymore.
@ -1445,18 +1466,15 @@ skb_release_data(struct sk_buff *skb)
if ((in_irq() || irqs_disabled()) && !vmklnx_is_panic()) {
vmk_PktReleaseIRQ(skb->pkt);
} else {
vmk_NetPoll pollPriv;
struct napi_struct *napi;
vmk_NetPoll netPoll;
/*
* Try to queue packets in NAPI's compPktList in order to
* release them in batch, but first thoroughly check if we
* got called from a napi context (PR #396873).
*/
if (vmk_NetPollGetCurrent(&pollPriv) == VMK_OK &&
(napi = (struct napi_struct *) vmk_NetPollGetPrivate(pollPriv)) != NULL &&
napi->net_poll_type == NETPOLL_DEFAULT) {
vmk_NetPollQueueCompPkt(pollPriv, skb->pkt);
if (vmk_NetPollGetCurrent(&netPoll) == VMK_OK) {
vmk_NetPollQueueCompPkt(netPoll, skb->pkt);
} else {
vmk_PktRelease(skb->pkt);
}
@ -2282,6 +2300,7 @@ vmklnx_netif_set_poll_cna(struct napi_struct *napi)
pollInit.priv = napi;
pollInit.deliveryCallback = LinuxCNA_Poll;
}
pollInit.priv = pollpriv_embed(pollInit.priv, napi->net_poll_type);
pollInit.features = VMK_NETPOLL_CUSTOM_DELIVERY_CALLBACK;
vmk_NetPollChangeCallback(napi->net_poll, &pollInit);
}
@ -6918,7 +6937,7 @@ FlushRxBuffers(void* clientData)
{
struct net_device* dev = (struct net_device*)clientData;
struct napi_struct* napi = NULL;
vmk_NetPoll pollPriv;
vmk_NetPoll netPoll;
VMKLNX_DEBUG(1, "client data, now net_device:%p", dev);
@ -6931,7 +6950,7 @@ FlushRxBuffers(void* clientData)
* Bypass the vswitch to receive the packets when the system is in the
* panic/debug mode.
*/
if (vmk_NetPollGetCurrent(&pollPriv) != VMK_OK) {
if (vmk_NetPollGetCurrent(&netPoll) != VMK_OK) {
if (debugPktList == NULL) {
debugPktList = (vmk_PktList) vmk_HeapAlloc(vmklnxLowHeap,
vmk_PktListSizeInBytes);

View file

@ -1,5 +1,5 @@
/* ****************************************************************
* Portions Copyright 1998, 2010 VMware, Inc.
* Portions Copyright 1998, 2010, 2013 VMware, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -696,8 +696,8 @@ SCSILinuxProcessCompletions(scsiLinuxTLS_t *tls, // IN
case DRIVER_BUSY:
case DRIVER_SOFT:
hostStatus = VMK_SCSI_HOST_OK;
deviceStatus = VMK_SCSI_DEVICE_BUSY;
hostStatus = VMK_SCSI_HOST_BUS_BUSY;
deviceStatus = VMK_SCSI_DEVICE_GOOD;
break;
case DRIVER_MEDIA: