ESXi-5.0-U2

This commit is contained in:
unknown 2015-10-23 15:55:21 -04:00
parent d0a14f9737
commit 1efda0e305
31 changed files with 8927 additions and 4115 deletions

View file

@ -0,0 +1,10 @@
/*
* DO NOT EDIT THIS FILE - IT IS GENERATED BY THE DRIVER BUILD.
*
* If you need to change the driver's name spaces, look in the scons
* files for the driver's defineVmkDriver() rule.
*/
VMK_NAMESPACE_PROVIDES("com.broadcom.tg3", "9.2.0.0");
#define VMKLNX_MY_NAMESPACE_VERSION "9.2.0.0"

View file

@ -398,6 +398,8 @@
#define VMK_SCSI_ASC_LOGICAL_UNIT_ERROR 0x3e
#define VMK_SCSI_ASCQ_LOGICAL_UNIT_FAILED_SELF_TEST 0x03
#define VMK_SCSI_ASCQ_LOGICAL_UNIT_FAILURE 0x01
/** \brief LU is not configured (array only). */
#define VMK_SCSI_ASC_LU_NOT_CONFIGURED 0x68
/*
* Inquiry data.

View file

@ -398,6 +398,8 @@
#define VMK_SCSI_ASC_LOGICAL_UNIT_ERROR 0x3e
#define VMK_SCSI_ASCQ_LOGICAL_UNIT_FAILED_SELF_TEST 0x03
#define VMK_SCSI_ASCQ_LOGICAL_UNIT_FAILURE 0x01
/** \brief LU is not configured (array only). */
#define VMK_SCSI_ASC_LU_NOT_CONFIGURED 0x68
/*
* Inquiry data.

View file

@ -1,6 +1,6 @@
#define BUILD_NUMBER "build-623860"
#define BUILD_NUMBER_NUMERIC 623860
#define BUILD_NUMBER_NUMERIC_STRING "623860"
#define PRODUCT_BUILD_NUMBER "product-build-45730"
#define PRODUCT_BUILD_NUMBER_NUMERIC 45730
#define PRODUCT_BUILD_NUMBER_NUMERIC_STRING "45730"
#define BUILD_NUMBER "build-920308"
#define BUILD_NUMBER_NUMERIC 920308
#define BUILD_NUMBER_NUMERIC_STRING "920308"
#define PRODUCT_BUILD_NUMBER "product-build-53817"
#define PRODUCT_BUILD_NUMBER_NUMERIC 53817
#define PRODUCT_BUILD_NUMBER_NUMERIC_STRING "53817"

View file

@ -438,6 +438,9 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
qc = NULL;
/* reset PIO HSM and stop DMA engine */
cancel_delayed_work_sync(&ap->port_task);
spin_lock_irqsave(ap->lock, flags);
ap->hsm_task_state = HSM_ST_IDLE;

View file

@ -307,7 +307,11 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode)
msg->rsp_size = 3;
smi_info->curr_msg = NULL;
/* To fix #PR 889881*/
spin_unlock(&(smi_info->msg_lock));
deliver_recv_msg(smi_info, msg);
spin_lock(&(smi_info->msg_lock));
}
static enum si_sm_result start_next_msg(struct smi_info *smi_info)
@ -767,7 +771,10 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
/* If we were handling a user message, format
a response to send to the upper layer to
tell it about the error. */
/* To fix #PR 889881*/
spin_lock(&(smi_info->msg_lock));
return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
spin_unlock(&(smi_info->msg_lock));
}
si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
}

View file

@ -104,7 +104,7 @@
#define DRV_MODULE_VERSION "1.61.15.v50.1"
#define DRV_MODULE_RELDATE "$DateTime: 2011/04/17 13:10:36 $"
#define DRV_MODULE_RELDATE "$DateTime: 2012/04/17 15:53:26 $"
#define BNX2X_BC_VER 0x040200
#if defined(BNX2X_UPSTREAM) && !defined(BNX2X_USE_INIT_VALUES) /* BNX2X_UPSTREAM */
@ -13641,7 +13641,7 @@ static void poll_bnx2x(struct net_device *dev)
#endif
#else
/* HAVE_POLL_CONTROLLER is used in 2.4 kernels */
#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
#if (defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)) && !defined(__VMKLNX__)
static void poll_bnx2x(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@ -13675,8 +13675,10 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_vlan_rx_register = bnx2x_vlan_rx_register,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
#if !defined(__VMKLNX__) /* BNX2X_UPSTREAM */
.ndo_poll_controller = poll_bnx2x,
#endif
#endif
};
#endif
@ -13872,8 +13874,10 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
#endif
#endif
#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
#if !defined(__VMKLNX__) /* BNX2X_UPSTREAM */
dev->poll_controller = poll_bnx2x;
#endif
#endif
#endif
dev->features |= NETIF_F_SG;
dev->features |= NETIF_F_HW_CSUM;

View file

@ -3738,6 +3738,9 @@ static irqreturn_t e1000_intr(int irq, void *data)
if (likely(netif_rx_schedule_prep(netdev, &adapter->rx_ring[0].napi))) {
#else /* defined(__VMKLNX__) */
if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
/* disable interrupts, without the synchronize_irq bit */
E1000_WRITE_REG(hw, E1000_IMC, ~0);
E1000_WRITE_FLUSH(&adapter->hw);
#endif /* !defined(__VMKLNX__) */
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;

View file

@ -959,8 +959,11 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
#ifdef __VMKLNX__
if (cleaned && (buffer_info->skb != NULL)) {
#else //!__VMKLNX__
if (cleaned) {
#endif //__VMKLNX__
struct sk_buff *skb = buffer_info->skb;
#ifdef NETIF_F_TSO
unsigned int segs, bytecount;

View file

@ -3496,15 +3496,11 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
/* Don't starve jumbo frames */
avg_wire_size = min(avg_wire_size, 3000);
#ifndef __VMKLNX__
/* Give a little boost to mid-size frames */
if ((avg_wire_size > 300) && (avg_wire_size < 1200))
new_val = avg_wire_size / 3;
else
new_val = avg_wire_size / 2;
#else
new_val = avg_wire_size;
#endif
set_itr_val:
if (new_val != q_vector->itr_val) {

View file

@ -107,7 +107,12 @@ int ixgbe_cna_enable(struct ixgbe_adapter *adapter)
cnadev->mtu = netdev->mtu;
cnadev->pdev = netdev->pdev;
cnadev->gso_max_size = GSO_MAX_SIZE;
#ifdef __VMKLNX__
cnadev->features = netdev->features | NETIF_F_CNA |
NETIF_F_HW_VLAN_FILTER;
#else
cnadev->features = netdev->features | NETIF_F_CNA;
#endif /*__VMKLNX__*/
/* set the MAC address to SAN mac address */
if (ixgbe_validate_mac_addr(adapter->hw.mac.san_addr) == 0)

View file

@ -2951,11 +2951,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
else
*speed = IXGBE_LINK_SPEED_100_FULL;
#ifndef __VMKLNX__
/* if link is down, zero out the current_mode */
if (*link_up == false) {
hw->fc.current_mode = ixgbe_fc_none;
hw->fc.fc_was_autonegged = false;
}
#endif
return 0;
}

View file

@ -74,7 +74,7 @@ static const char ixgbe_driver_string[] =
#define FPGA
#define DRV_VERSION "2.0.84.8.2-10vmw" DRIVERNAPI DRV_HW_PERF FPGA
#define DRV_VERSION "2.0.84.8.2-11vmw" DRIVERNAPI DRV_HW_PERF FPGA
const char ixgbe_driver_version[] = DRV_VERSION;
static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
/* ixgbe_pci_tbl - PCI Device ID Table
@ -6095,7 +6095,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
adapter->dcb_cfg.rx_pba_cfg = pba_equal;
adapter->dcb_cfg.pfc_mode_enable = true;
adapter->dcb_cfg.pfc_mode_enable = false;
adapter->dcb_cfg.round_robin_enable = false;
adapter->dcb_set_bitmap = 0x00;
@ -8281,18 +8281,24 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
DPRINTK(TX_ERR, INFO, "my (preferred) node is: %d\n", adapter->node);
#ifndef __VMKLNX__
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
hw->fc.requested_mode = ixgbe_fc_pfc;
hw->fc.current_mode = ixgbe_fc_pfc; /* init for ethtool output */
}
#endif
#ifdef MAX_SKB_FRAGS
#ifdef NETIF_F_HW_VLAN_TX
netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_HW_VLAN_TX |
#ifdef __VMKLNX__
NETIF_F_HW_VLAN_RX;
#else
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
#endif /*__VMKLNX__*/
#else
netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM;

View file

@ -29,7 +29,7 @@
/*
* Source file for NIC routines to access the Phantom hardware
*
* $Id: //depot/vmkdrivers/esx50u1/src_9/drivers/net/nx_nic/unm_nic_hw.c#1 $
* $Id: //depot/vmkdrivers/esx50u2/src_9/drivers/net/nx_nic/unm_nic_hw.c#1 $
*
*/
#include <linux/delay.h>

View file

@ -0,0 +1,86 @@
/****************************************************************************
* Copyright(c) 2000-2012 Broadcom Corporation, all rights reserved
* Proprietary and Confidential Information.
*
* This source file is the property of Broadcom Corporation, and
* may not be copied or distributed in any isomorphic form without
* the prior written consent of Broadcom Corporation.
*
* Name: esx_ioctl.h
*
* Description: Define data structures and prototypes to access ioctls
* supported by driver in VMware ESXi system.
*
* Author: cchsu
*
* $Log:
*
****************************************************************************/
#ifndef BRCM_VMWARE_IOCTL_H
#define BRCM_VMWARE_IOCTL_H
#ifdef __cplusplus
extern "C" {
#endif
#define BRCM_VMWARE_CIM_IOCTL 0x89f0
#define BRCM_VMWARE_CIM_CMD_ENABLE_NIC 0x0001
#define BRCM_VMWARE_CIM_CMD_DISABLE_NIC 0x0002
#define BRCM_VMWARE_CIM_CMD_REG_READ 0x0003
#define BRCM_VMWARE_CIM_CMD_REG_WRITE 0x0004
#define BRCM_VMWARE_CIM_CMD_GET_NIC_PARAM 0x0005
#define BRCM_VMWARE_CIM_CMD_GET_NIC_STATUS 0x0006
#define BRCM_VMWARE_CIM_CMD_CFG_REG_READ 0x0007
#define BRCM_VMWARE_CIM_CMD_CFG_REG_WRITE 0x0008
// Access type for Register Read/Write Ioctl
#define BRCM_VMWARE_REG_ACCESS_DIRECT 0x0000
#define BRCM_VMWARE_REG_ACCESS_PCI_CFG 0x0001
struct brcm_vmware_ioctl_reg_read_req
{
u32 reg_offset;
u32 reg_value;
u32 reg_access_type;
} __attribute__((packed));
struct brcm_vmware_ioctl_reg_write_req
{
u32 reg_offset;
u32 reg_value;
u32 reg_access_type;
} __attribute__((packed));
#define BRCM_VMWARE_GET_NIC_PARAM_VERSION 1
struct brcm_vmware_ioctl_get_nic_param_req
{
u32 version;
u32 mtu;
u8 current_mac_addr[8];
} __attribute__((packed));
#define BRCM_VMWARE_INVALID_NIC_STATUS 0xffffffff
struct brcm_vmware_ioctl_get_nic_status_req
{
u32 nic_status; // 1: Up, 0: Down
} __attribute__((packed));
struct brcm_vmware_ioctl_req
{
u32 cmd;
union {
// no struct for reset_nic command
struct brcm_vmware_ioctl_reg_read_req reg_read_req;
struct brcm_vmware_ioctl_reg_write_req reg_write_req;
struct brcm_vmware_ioctl_get_nic_param_req get_nic_param_req;
struct brcm_vmware_ioctl_get_nic_status_req get_nic_status_req;
} cmd_req;
} __attribute__((packed));
#ifdef __cplusplus
};
#endif
#endif

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,7 +1,18 @@
/* Copyright (C) 2008-2010 Broadcom Corporation. */
/* Copyright (C) 2008-2012 Broadcom Corporation. */
#include "tg3_flags.h"
#ifdef CONFIG_X86
#undef NET_IP_ALIGN
#define NET_IP_ALIGN 0
#endif
#ifdef BCM_HAS_IEEE1588_SUPPORT
#include <linux/timecompare.h>
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
#endif
#if !defined(__maybe_unused)
#define __maybe_unused /* unimplemented */
#endif
@ -10,6 +21,10 @@
#define __iomem
#endif
#ifndef __always_unused
#define __always_unused
#endif
#ifndef __acquires
#define __acquires(x)
#endif
@ -38,6 +53,10 @@
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
#endif
#ifndef DIV_ROUND_UP
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
#endif
#ifndef __ALIGN_MASK
#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
#endif
@ -210,7 +229,7 @@ static unsigned long msleep_interruptible(unsigned int msecs)
#define dev_driver_string(dev) "tg3"
#endif
#ifndef BCM_HAS_DEV_NAME
#if !defined(BCM_HAS_DEV_NAME) || defined(__VMKLNX__)
#define dev_name(dev) ""
#endif
@ -480,6 +499,10 @@ typedef u32 pci_power_t;
#define PCI_D3hot 3
#endif
#ifndef PCI_D3cold
#define PCI_D3cold 4
#endif
#ifndef DMA_64BIT_MASK
#define DMA_64BIT_MASK ((u64) 0xffffffffffffffffULL)
#endif
@ -516,25 +539,6 @@ static inline pci_power_t pci_choose_state(struct pci_dev *dev,
}
#endif
#ifndef BCM_HAS_PCI_PME_CAPABLE
static bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
{
int pm_cap;
u16 caps;
pm_cap = pci_find_capability(dev, PCI_CAP_ID_PM);
if (pm_cap == 0)
return false;
pci_read_config_word(dev, pm_cap + PCI_PM_PMC, &caps);
if (caps & PCI_PM_CAP_PME_D3cold)
return true;
return false;
}
#endif /* BCM_HAS_PCI_PME_CAPABLE */
#ifndef BCM_HAS_PCI_ENABLE_WAKE
static int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
{
@ -561,6 +565,37 @@ static int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
}
#endif /* BCM_HAS_PCI_ENABLE_WAKE */
#ifndef BCM_HAS_PCI_WAKE_FROM_D3
#ifndef BCM_HAS_PCI_PME_CAPABLE
static bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
{
int pm_cap;
u16 caps;
bool ret = false;
pm_cap = pci_find_capability(dev, PCI_CAP_ID_PM);
if (pm_cap == 0)
goto done;
pci_read_config_word(dev, pm_cap + PCI_PM_PMC, &caps);
if (state == PCI_D3cold &&
(caps & PCI_PM_CAP_PME_D3cold))
ret = true;
done:
return ret;
}
#endif /* BCM_HAS_PCI_PME_CAPABLE */
static int pci_wake_from_d3(struct pci_dev *dev, bool enable)
{
return pci_pme_capable(dev, PCI_D3cold) ?
pci_enable_wake(dev, PCI_D3cold, enable) :
pci_enable_wake(dev, PCI_D3hot, enable);
}
#endif /* BCM_HAS_PCI_WAKE_FROM_D3 */
#ifndef BCM_HAS_PCI_SET_POWER_STATE
static int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{
@ -587,13 +622,28 @@ static int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
}
#endif /* BCM_HAS_PCI_SET_POWER_STATE */
#ifdef __VMKLNX__
/* VMWare disables CONFIG_PM in their kernel configs.
* This renders WOL inop, because device_may_wakeup() always returns false.
*/
#undef BCM_HAS_DEVICE_WAKEUP_API
#endif
#ifndef BCM_HAS_DEVICE_WAKEUP_API
#undef device_init_wakeup
#define device_init_wakeup(dev, val)
#undef device_can_wakeup
#define device_can_wakeup(dev) 1
#undef device_set_wakeup_enable
#define device_set_wakeup_enable(dev, val)
#undef device_may_wakeup
#define device_may_wakeup(dev) 1
#endif /* BCM_HAS_DEVICE_WAKEUP_API */
#ifndef BCM_HAS_DEVICE_SET_WAKEUP_CAPABLE
#define device_set_wakeup_capable(dev, val)
#endif /* BCM_HAS_DEVICE_SET_WAKEUP_CAPABLE */
#ifndef PCI_X_CMD_READ_2K
#define PCI_X_CMD_READ_2K 0x0008
@ -628,13 +678,59 @@ static int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
#define PCI_EXP_DEVSTA_URD 0x08
#endif
#ifndef BCM_HAS_PCIE_SET_READRQ
#ifndef PCI_EXP_LNKSTA
#define PCI_EXP_LNKSTA 18
#endif
#ifndef PCI_EXP_LNKSTA_CLS
#define PCI_EXP_LNKSTA_CLS 0x000f
#endif
#ifndef PCI_EXP_LNKSTA_CLS_2_5GB
#define PCI_EXP_LNKSTA_CLS_2_5GB 0x01
#endif
#ifndef PCI_EXP_LNKSTA_CLS_5_0GB
#define PCI_EXP_LNKSTA_CLS_5_0GB 0x02
#endif
#ifndef PCI_EXP_LNKSTA_NLW
#define PCI_EXP_LNKSTA_NLW 0x03f0
#endif
#ifndef PCI_EXP_LNKSTA_NLW_SHIFT
#define PCI_EXP_LNKSTA_NLW_SHIFT 4
#endif
#ifndef PCI_EXP_DEVCTL
#define PCI_EXP_DEVCTL 8
#endif
#ifndef PCI_EXP_DEVCTL_READRQ
#define PCI_EXP_DEVCTL_READRQ 0x7000
#endif
#ifndef BCM_HAS_PCIE_GET_READRQ
int pcie_get_readrq(struct pci_dev *dev)
{
int ret, cap;
u16 ctl;
cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
if (!cap) {
ret = -EINVAL;
goto out;
}
ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
if (!ret)
ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
out:
return ret;
}
#endif /* BCM_HAS_PCIE_GET_READRQ */
#ifndef BCM_HAS_PCIE_SET_READRQ
static inline int pcie_set_readrq(struct pci_dev *dev, int rq)
{
int cap, err = -EINVAL;
@ -708,6 +804,10 @@ pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, u8 *buf)
}
#endif /* BCM_HAS_PCI_READ_VPD */
#ifndef PCI_VPD_RO_KEYWORD_CHKSUM
#define PCI_VPD_RO_KEYWORD_CHKSUM "RV"
#endif
#ifndef PCI_VPD_LRDT
#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
#define PCI_VPD_LRDT_ID(x) (x | PCI_VPD_LRDT)
@ -912,6 +1012,18 @@ static int tg3_set_tx_hw_csum(struct net_device *dev, u32 data)
#define BCM_NO_IPV6_CSUM 1
#endif
#ifndef NETIF_F_RXCSUM
#define NETIF_F_RXCSUM (1 << 29)
#endif
#ifndef NETIF_F_GRO
#define NETIF_F_GRO 16384
#endif
#ifndef NETIF_F_LOOPBACK
#define NETIF_F_LOOPBACK (1 << 31)
#endif
#ifdef NETIF_F_TSO
#ifndef NETIF_F_GSO
#define gso_size tso_size
@ -925,6 +1037,18 @@ static int tg3_set_tx_hw_csum(struct net_device *dev, u32 data)
#define NETIF_F_TSO_ECN 0
#endif
#ifndef NETIF_F_ALL_TSO
#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
#endif
#ifndef BCM_HAS_SKB_TX_TIMESTAMP
#define skb_tx_timestamp(skb)
#endif
#ifndef BCM_HAS_SKB_FRAG_SIZE
#define skb_frag_size(skb_frag) ((skb_frag)->size)
#endif
#if (LINUX_VERSION_CODE < 0x2060c)
static inline int skb_header_cloned(struct sk_buff *skb) { return 0; }
#endif
@ -957,6 +1081,13 @@ static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
}
#endif
#ifndef BCM_HAS_TCP_HDRLEN
static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
{
return tcp_hdr(skb)->doff * 4;
}
#endif
#ifndef BCM_HAS_TCP_OPTLEN
static inline unsigned int tcp_optlen(const struct sk_buff *skb)
{
@ -1185,10 +1316,6 @@ static struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
#endif /* NETIF_F_TSO */
#ifndef BCM_HAS_SKB_GET_QUEUE_MAPPING
#define skb_get_queue_mapping(skb) 0
#endif
#ifndef BCM_HAS_SKB_COPY_FROM_LINEAR_DATA
static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
void *to,
@ -1198,10 +1325,37 @@ static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
}
#endif
#if TG3_TSO_SUPPORT != 0
#if defined(BCM_NO_TSO6)
static inline int skb_is_gso_v6(const struct sk_buff *skb)
{
return 0;
}
#else
#if !defined(BCM_HAS_SKB_IS_GSO_V6)
static inline int skb_is_gso_v6(const struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
}
#endif
#endif
#endif
#ifndef BCM_HAS_SKB_CHECKSUM_NONE_ASSERT
static inline void skb_checksum_none_assert(struct sk_buff *skb)
{
skb->ip_summed = CHECKSUM_NONE;
}
#endif
#ifndef BCM_HAS_NETDEV_TX_T
typedef int netdev_tx_t;
#endif
#ifndef BCM_HAS_NETDEV_FEATURES_T
typedef u64 netdev_features_t;
#endif
#ifndef BCM_HAS_NETDEV_NAME
#define netdev_name(netdev) netdev->name
#endif
@ -1308,7 +1462,7 @@ static inline void netif_tx_unlock(struct net_device *dev)
#endif /* BCM_HAS_STRUCT_NETDEV_QUEUE */
#ifndef BCM_HAS_ALLOC_ETHERDEV_MQ
#if !defined(BCM_HAS_ALLOC_ETHERDEV_MQ) || !defined(TG3_NAPI)
#define alloc_etherdev_mq(size, numqs) alloc_etherdev((size))
#endif
@ -1322,6 +1476,10 @@ static inline void netif_tx_unlock(struct net_device *dev)
netif_receive_skb((skb))
#endif
#if !defined(BCM_HAS_SKB_GET_QUEUE_MAPPING) || !defined(TG3_NAPI)
#define skb_get_queue_mapping(skb) 0
#endif
#if (LINUX_VERSION_CODE < 0x020612)
static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
unsigned int length)
@ -1333,7 +1491,7 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
}
#endif
#if !defined(HAVE_NETDEV_PRIV) && (LINUX_VERSION_CODE != 0x020603) && (LINUX_VERSION_CODE != 0x020604) && (LINUX_VERSION_CODE != 0x20605)
#ifndef BCM_HAS_NETDEV_PRIV
static inline void *netdev_priv(struct net_device *dev)
{
return dev->priv;
@ -1363,6 +1521,26 @@ static inline void netif_tx_disable(struct net_device *dev)
}
#endif /* OLD_NETIF */
#ifndef BCM_HAS_NETDEV_SENT_QUEUE
#define netdev_sent_queue(dev, bytes)
#endif
#ifndef BCM_HAS_NETDEV_COMPLETED_QUEUE
#define netdev_completed_queue(dev, pkts, bytes)
#endif
#ifndef BCM_HAS_NETDEV_RESET_QUEUE
#define netdev_reset_queue(dev_queue)
#endif
#ifndef BCM_HAS_NETIF_SET_REAL_NUM_TX_QUEUES
#define netif_set_real_num_tx_queues(dev, nq) ((dev)->real_num_tx_queues = (nq))
#endif
#ifndef BCM_HAS_NETIF_SET_REAL_NUM_RX_QUEUES
#define netif_set_real_num_rx_queues(dev, nq) 0
#endif
#ifndef netdev_mc_count
#define netdev_mc_count(dev) ((dev)->mc_count)
#endif
@ -1394,20 +1572,24 @@ struct netdev_hw_addr {
u8 * addr;
struct dev_mc_list * curr;
};
#undef netdev_for_each_mc_addr
#define netdev_for_each_mc_addr(ha, dev) \
struct netdev_hw_addr mclist; \
ha = &mclist; \
for (mclist.curr = dev->mc_list; mclist.curr && (mclist.addr = &mclist.curr->dmi_addr[0]); mclist.curr = mclist.curr->next)
#endif /* BCM_HAS_NETDEV_HW_ADDR */
#ifndef BCM_HAS_GET_STATS64
#define rtnl_link_stats64 net_device_stats
#endif /* BCM_HAS_GET_STATS64 */
#ifndef VLAN_GROUP_ARRAY_SPLIT_PARTS
static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id,
struct net_device *dev)
{
if (vg)
vg->vlan_devices[vlan_id] = dev;
}
#ifndef BCM_HAS_EXTERNAL_LB_DONE
#define ETH_TEST_FL_EXTERNAL_LB (1 << 2)
#define ETH_TEST_FL_EXTERNAL_LB_DONE (1 << 3)
#endif
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define BCM_KERNEL_SUPPORTS_8021Q
#endif
#ifndef ETH_SS_TEST
@ -1426,6 +1608,18 @@ static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id,
#ifndef MII_CTRL1000
#define MII_CTRL1000 0x09
#endif
#ifndef ADVERTISE_1000HALF
#define ADVERTISE_1000HALF 0x0100
#endif
#ifndef ADVERTISE_1000FULL
#define ADVERTISE_1000FULL 0x0200
#endif
#ifndef CTL1000_AS_MASTER
#define CTL1000_AS_MASTER 0x0800
#endif
#ifndef CTL1000_ENABLE_MASTER
#define CTL1000_ENABLE_MASTER 0x1000
#endif
#ifndef MII_STAT1000
#define MII_STAT1000 0x0a
#endif
@ -1468,17 +1662,170 @@ static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id,
#ifndef LPA_PAUSE_ASYM
#define LPA_PAUSE_ASYM 0x0800
#endif
#ifndef ADVERTISE_1000HALF
#define ADVERTISE_1000HALF 0x0100
#ifndef LPA_1000FULL
#define LPA_1000FULL 0x0800
#endif
#ifndef ADVERTISE_1000FULL
#define ADVERTISE_1000FULL 0x0200
#ifndef LPA_1000HALF
#define LPA_1000HALF 0x0400
#endif
#ifndef ETHTOOL_FWVERS_LEN
#define ETHTOOL_FWVERS_LEN 32
#endif
#ifndef MDIO_MMD_AN
#define MDIO_MMD_AN 7
#endif
#ifndef MDIO_AN_EEE_ADV
#define MDIO_AN_EEE_ADV 60
#endif
#ifndef MDIO_AN_EEE_ADV_100TX
#define MDIO_AN_EEE_ADV_100TX 0x0002
#endif
#ifndef MDIO_AN_EEE_ADV_1000T
#define MDIO_AN_EEE_ADV_1000T 0x0004
#endif
#ifndef BCM_HAS_ETHTOOL_ADV_TO_MII_100BT
static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv)
{
u32 result = 0;
if (ethadv & ADVERTISED_10baseT_Half)
result |= ADVERTISE_10HALF;
if (ethadv & ADVERTISED_10baseT_Full)
result |= ADVERTISE_10FULL;
if (ethadv & ADVERTISED_100baseT_Half)
result |= ADVERTISE_100HALF;
if (ethadv & ADVERTISED_100baseT_Full)
result |= ADVERTISE_100FULL;
if (ethadv & ADVERTISED_Pause)
result |= ADVERTISE_PAUSE_CAP;
if (ethadv & ADVERTISED_Asym_Pause)
result |= ADVERTISE_PAUSE_ASYM;
return result;
}
static inline u32 mii_adv_to_ethtool_adv_t(u32 adv)
{
u32 result = 0;
if (adv & ADVERTISE_10HALF)
result |= ADVERTISED_10baseT_Half;
if (adv & ADVERTISE_10FULL)
result |= ADVERTISED_10baseT_Full;
if (adv & ADVERTISE_100HALF)
result |= ADVERTISED_100baseT_Half;
if (adv & ADVERTISE_100FULL)
result |= ADVERTISED_100baseT_Full;
if (adv & ADVERTISE_PAUSE_CAP)
result |= ADVERTISED_Pause;
if (adv & ADVERTISE_PAUSE_ASYM)
result |= ADVERTISED_Asym_Pause;
return result;
}
static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv)
{
u32 result = 0;
if (ethadv & ADVERTISED_1000baseT_Half)
result |= ADVERTISE_1000HALF;
if (ethadv & ADVERTISED_1000baseT_Full)
result |= ADVERTISE_1000FULL;
return result;
}
static inline u32 mii_ctrl1000_to_ethtool_adv_t(u32 adv)
{
u32 result = 0;
if (adv & ADVERTISE_1000HALF)
result |= ADVERTISED_1000baseT_Half;
if (adv & ADVERTISE_1000FULL)
result |= ADVERTISED_1000baseT_Full;
return result;
}
static inline u32 mii_lpa_to_ethtool_lpa_t(u32 lpa)
{
u32 result = 0;
if (lpa & LPA_LPACK)
result |= ADVERTISED_Autoneg;
return result | mii_adv_to_ethtool_adv_t(lpa);
}
static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa)
{
u32 result = 0;
if (lpa & LPA_1000HALF)
result |= ADVERTISED_1000baseT_Half;
if (lpa & LPA_1000FULL)
result |= ADVERTISED_1000baseT_Full;
return result;
}
static inline u32 ethtool_adv_to_mii_adv_x(u32 ethadv)
{
u32 result = 0;
if (ethadv & ADVERTISED_1000baseT_Half)
result |= ADVERTISE_1000XHALF;
if (ethadv & ADVERTISED_1000baseT_Full)
result |= ADVERTISE_1000XFULL;
if (ethadv & ADVERTISED_Pause)
result |= ADVERTISE_1000XPAUSE;
if (ethadv & ADVERTISED_Asym_Pause)
result |= ADVERTISE_1000XPSE_ASYM;
return result;
}
static inline u32 mii_adv_to_ethtool_adv_x(u32 adv)
{
u32 result = 0;
if (adv & ADVERTISE_1000XHALF)
result |= ADVERTISED_1000baseT_Half;
if (adv & ADVERTISE_1000XFULL)
result |= ADVERTISED_1000baseT_Full;
if (adv & ADVERTISE_1000XPAUSE)
result |= ADVERTISED_Pause;
if (adv & ADVERTISE_1000XPSE_ASYM)
result |= ADVERTISED_Asym_Pause;
return result;
}
static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
{
u32 result = 0;
if (lpa & LPA_LPACK)
result |= ADVERTISED_Autoneg;
return result | mii_adv_to_ethtool_adv_x(lpa);
}
#endif /* BCM_HAS_ETHTOOL_ADV_TO_MII_100BT */
#ifndef BCM_HAS_ETHTOOL_RXFH_INDIR_DEFAULT
static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
{
return index % n_rx_rings;
}
#endif /* BCM_HAS_ETHTOOL_RXFH_INDIR_DEFAULT */
#ifndef BCM_HAS_MII_RESOLVE_FLOWCTRL_FDX
#ifndef FLOW_CTRL_TX
#define FLOW_CTRL_TX 0x01
@ -1490,18 +1837,12 @@ static u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv)
{
u8 cap = 0;
if (lcladv & ADVERTISE_PAUSE_CAP) {
if (lcladv & ADVERTISE_PAUSE_ASYM) {
if (rmtadv & LPA_PAUSE_CAP)
if (lcladv & rmtadv & ADVERTISE_PAUSE_CAP) {
cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
else if (rmtadv & LPA_PAUSE_ASYM)
cap = FLOW_CTRL_RX;
} else {
if (rmtadv & LPA_PAUSE_CAP)
cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
}
} else if (lcladv & ADVERTISE_PAUSE_ASYM) {
if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
if (lcladv & LPA_PAUSE_CAP)
cap = FLOW_CTRL_RX;
if (rmtadv & LPA_PAUSE_CAP)
cap = FLOW_CTRL_TX;
}
@ -1509,6 +1850,24 @@ static u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv)
}
#endif /* BCM_HAS_MII_RESOLVE_FLOWCTRL_FDX */
#ifndef BCM_HAS_MII_ADVERTISE_FLOWCTRL
static u16 mii_advertise_flowctrl(u8 flow_ctrl)
{
u16 miireg;
if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
miireg = ADVERTISE_PAUSE_CAP;
else if (flow_ctrl & FLOW_CTRL_TX)
miireg = ADVERTISE_PAUSE_ASYM;
else if (flow_ctrl & FLOW_CTRL_RX)
miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
else
miireg = 0;
return miireg;
}
#endif /* BCM_HAS_MII_ADVERTISE_FLOWCTRL */
#ifdef BCM_INCLUDE_PHYLIB_SUPPORT
#ifndef PHY_ID_BCM50610
@ -1574,3 +1933,18 @@ void mdiobus_free(struct mii_bus *bus)
#endif
#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */
#ifndef BCM_HAS_ETHTOOL_CMD_SPEED
static inline __u32 ethtool_cmd_speed(struct ethtool_cmd *ep)
{
return ep->speed;
}
#endif /* BCM_HAS_ETHTOOL_CMD_SPEED */
#ifndef BCM_HAS_ETHTOOL_CMD_SPEED_SET
static inline __u32 ethtool_cmd_speed_set(struct ethtool_cmd *ep, __u32 speed)
{
ep->speed = speed;
return 0;
}
#endif /* BCM_HAS_ETHTOOL_CMD_SPEED_SET */

View file

@ -1,4 +1,76 @@
/* Copyright (C) 2009-2010 Broadcom Corporation. */
/* Copyright (C) 2009-2012 Broadcom Corporation. */
#ifndef BCM_HAS_PCI_PCIE_CAP
static inline int pci_pcie_cap(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct tg3 *tp = netdev_priv(dev);
return tp->pcie_cap;
}
#endif
#ifndef BCM_HAS_PCI_IS_PCIE
static inline bool pci_is_pcie(struct pci_dev *dev)
{
return !!pci_pcie_cap(dev);
}
#endif
#ifndef BCM_HAS_SKB_FRAG_DMA_MAP
#define skb_frag_dma_map(x, frag, y, len, z) \
pci_map_page(tp->pdev, (frag)->page, \
(frag)->page_offset, (len), PCI_DMA_TODEVICE)
#endif
#ifdef SIMPLE_DEV_PM_OPS
#define tg3_invalid_pci_state(tp, state) false
#define tg3_pci_save_state(tp)
#define tg3_pci_restore_state(tp)
#else /* SIMPLE_DEV_PM_OPS */
#if (LINUX_VERSION_CODE < 0x2060b)
static bool tg3_invalid_pci_state(struct tg3 *tp, u32 state)
{
bool ret = true;
pci_power_t target_state;
target_state = pci_choose_state(tp->pdev, state);
if (target_state != PCI_D3hot || target_state != PCI_D3cold)
ret = false;
return ret;
}
#else
static bool tg3_invalid_pci_state(struct tg3 *tp, pm_message_t state)
{
bool ret = true;
pci_power_t target_state;
#ifdef BCM_HAS_PCI_TARGET_STATE
target_state = tp->pdev->pm_cap ? pci_target_state(tp->pdev) : PCI_D3hot;
#else
target_state = pci_choose_state(tp->pdev, state);
#endif
if (target_state != PCI_D3hot || target_state != PCI_D3cold)
ret = false;
return ret;
}
#endif
#if (LINUX_VERSION_CODE < 0x2060a)
#define tg3_pci_save_state(tp) pci_save_state(tp->pdev, tp->pci_cfg_state)
#define tg3_pci_restore_state(tp) pci_restore_state(tp->pdev, tp->pci_cfg_state)
#else
#define tg3_pci_save_state(tp) pci_save_state(tp->pdev)
#define tg3_pci_restore_state(tp) pci_restore_state(tp->pdev)
#endif
#endif /* SIMPLE_DEV_PM_OPS */
#ifdef BCM_HAS_NEW_PCI_DMA_MAPPING_ERROR
#define tg3_pci_dma_mapping_error(pdev, mapping) pci_dma_mapping_error((pdev), (mapping))
@ -8,6 +80,13 @@
#define tg3_pci_dma_mapping_error(pdev, mapping) 0
#endif
#ifndef BCM_HAS_HW_FEATURES
#define hw_features features
#endif
#ifndef BCM_HAS_VLAN_FEATURES
#define vlan_features features
#endif
#ifdef HAVE_POLL_CONTROLLER
#define CONFIG_NET_POLL_CONTROLLER
@ -16,8 +95,8 @@
static inline void tg3_5780_class_intx_workaround(struct tg3 *tp)
{
#ifndef BCM_HAS_INTX_MSI_WORKAROUND
if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
if (tg3_flag(tp, 5780_CLASS) &&
tg3_flag(tp, USING_MSI))
tg3_enable_intx(tp->pdev);
#endif
}
@ -28,6 +107,322 @@ static inline void tg3_5780_class_intx_workaround(struct tg3 *tp)
#define tg3_update_trans_start(dev) ((dev)->trans_start = jiffies)
#endif
#ifndef BCM_HAS_VLAN_HWACCEL_PUT_TAG
#define TG3_TO_INT(Y) ((int)((unsigned long long)(Y) & (SMP_CACHE_BYTES - 1)))
#define TG3_COMPAT_VLAN_ALLOC_LEN (SMP_CACHE_BYTES + VLAN_HLEN)
#define TG3_COMPAT_VLAN_RESERVE(addr) (SKB_DATA_ALIGN((addr) + VLAN_HLEN) - (addr))
#else
#define TG3_COMPAT_VLAN_ALLOC_LEN 0
#define TG3_COMPAT_VLAN_RESERVE(addr) 0
#endif
#ifdef BCM_KERNEL_SUPPORTS_8021Q
#ifndef BCM_HAS_VLAN_HWACCEL_PUT_TAG
#undef TG3_RAW_IP_ALIGN
#define TG3_RAW_IP_ALIGN (2 + VLAN_HLEN)
static inline struct sk_buff *tg3_vlan_hwaccel_put_tag(struct sk_buff *skb,
u16 vlan_tci)
{
struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
__skb_push(skb, VLAN_HLEN);
memmove(ve, skb->data + VLAN_HLEN, ETH_ALEN * 2);
ve->h_vlan_proto = htons(ETH_P_8021Q);
ve->h_vlan_TCI = htons(vlan_tci);
skb->protocol = htons(ETH_P_8021Q);
return skb;
}
#endif /* BCM_HAS_VLAN_HWACCEL_PUT_TAG */
#ifdef BCM_USE_OLD_VLAN_INTERFACE
static void __tg3_set_rx_mode(struct net_device *);
static inline void tg3_netif_start(struct tg3 *tp);
static inline void tg3_netif_stop(struct tg3 *tp);
static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
static inline void tg3_full_unlock(struct tg3 *tp);
static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
{
struct tg3 *tp = netdev_priv(dev);
if (!netif_running(dev)) {
tp->vlgrp = grp;
return;
}
tg3_netif_stop(tp);
tg3_full_lock(tp, 0);
tp->vlgrp = grp;
/* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
__tg3_set_rx_mode(dev);
tg3_netif_start(tp);
tg3_full_unlock(tp);
}
#ifndef BCM_HAS_NET_DEVICE_OPS
#ifndef BCM_HAS_VLAN_GROUP_SET_DEVICE
static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id,
struct net_device *dev)
{
if (vg)
vg->vlan_devices[vlan_id] = dev;
}
#endif
static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct tg3 *tp = netdev_priv(dev);
if (netif_running(dev))
tg3_netif_stop(tp);
tg3_full_lock(tp, 0);
vlan_group_set_device(tp->vlgrp, vid, NULL);
tg3_full_unlock(tp);
if (netif_running(dev))
tg3_netif_start(tp);
}
#endif /* BCM_HAS_NET_DEVICE_OPS */
#endif /* BCM_USE_OLD_VLAN_INTERFACE */
#endif /* BCM_KERNEL_SUPPORTS_8021Q */
#ifndef BCM_HAS_NETDEV_UPDATE_FEATURES
static u32 tg3_get_rx_csum(struct net_device *dev)
{
return (dev->features & NETIF_F_RXCSUM) != 0;
}
static int tg3_set_rx_csum(struct net_device *dev, u32 data)
{
struct tg3 *tp = netdev_priv(dev);
/* BROKEN_CHECKSUMS */
if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0) {
if (data != 0)
return -EINVAL;
return 0;
}
spin_lock_bh(&tp->lock);
if (data)
dev->features |= NETIF_F_RXCSUM;
else
dev->features &= ~NETIF_F_RXCSUM;
spin_unlock_bh(&tp->lock);
return 0;
}
#ifdef BCM_HAS_SET_TX_CSUM
static int tg3_set_tx_csum(struct net_device *dev, u32 data)
{
struct tg3 *tp = netdev_priv(dev);
/* BROKEN_CHECKSUMS */
if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0) {
if (data != 0)
return -EINVAL;
return 0;
}
if (tg3_flag(tp, 5755_PLUS))
#if defined(BCM_HAS_ETHTOOL_OP_SET_TX_IPV6_CSUM)
ethtool_op_set_tx_ipv6_csum(dev, data);
#elif defined(BCM_HAS_ETHTOOL_OP_SET_TX_HW_CSUM)
ethtool_op_set_tx_hw_csum(dev, data);
#else
tg3_set_tx_hw_csum(dev, data);
#endif
else
ethtool_op_set_tx_csum(dev, data);
return 0;
}
#endif
#if TG3_TSO_SUPPORT != 0
static int tg3_set_tso(struct net_device *dev, u32 value)
{
struct tg3 *tp = netdev_priv(dev);
if (!tg3_flag(tp, TSO_CAPABLE)) {
if (value)
return -EINVAL;
return 0;
}
if ((dev->features & NETIF_F_IPV6_CSUM) &&
(tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3))) {
if (value) {
dev->features |= NETIF_F_TSO6;
if (tg3_flag(tp, HW_TSO_3) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
dev->features |= NETIF_F_TSO_ECN;
} else
dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
}
return ethtool_op_set_tso(dev, value);
}
#endif
static void netdev_update_features(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
if (dev->mtu > ETH_DATA_LEN) {
if (tg3_flag(tp, 5780_CLASS)) {
#if TG3_TSO_SUPPORT != 0
ethtool_op_set_tso(dev, 0);
#endif
}
}
}
#endif /* BCM_HAS_NETDEV_UPDATE_FEATURES */
#ifndef BCM_HAS_SET_PHYS_ID
enum ethtool_phys_id_state {
ETHTOOL_ID_INACTIVE,
ETHTOOL_ID_ACTIVE,
ETHTOOL_ID_ON,
ETHTOOL_ID_OFF
};
static int tg3_set_phys_id(struct net_device *dev,
enum ethtool_phys_id_state state);
static int tg3_phys_id(struct net_device *dev, u32 data)
{
struct tg3 *tp = netdev_priv(dev);
int i;
if (!netif_running(tp->dev))
return -EAGAIN;
if (data == 0)
data = UINT_MAX / 2;
for (i = 0; i < (data * 2); i++) {
if ((i % 2) == 0)
tg3_set_phys_id(dev, ETHTOOL_ID_ON);
else
tg3_set_phys_id(dev, ETHTOOL_ID_OFF);
if (msleep_interruptible(500))
break;
}
tg3_set_phys_id(dev, ETHTOOL_ID_INACTIVE);
return 0;
}
#endif /* BCM_HAS_SET_PHYS_ID */
#ifdef BCM_HAS_GET_RXFH_INDIR
#ifndef BCM_HAS_GET_RXFH_INDIR_SIZE
static int tg3_get_rxfh_indir(struct net_device *dev,
struct ethtool_rxfh_indir *indir)
{
struct tg3 *tp = netdev_priv(dev);
int i;
if (!tg3_flag(tp, SUPPORT_MSIX))
return -EINVAL;
if (!indir->size) {
indir->size = TG3_RSS_INDIR_TBL_SIZE;
return 0;
}
if (indir->size != TG3_RSS_INDIR_TBL_SIZE)
return -EINVAL;
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
indir->ring_index[i] = tp->rss_ind_tbl[i];
return 0;
}
static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt);
static void tg3_rss_write_indir_tbl(struct tg3 *tp);
static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
static inline void tg3_full_unlock(struct tg3 *tp);
static int tg3_set_rxfh_indir(struct net_device *dev,
const struct ethtool_rxfh_indir *indir)
{
struct tg3 *tp = netdev_priv(dev);
size_t i;
if (!tg3_flag(tp, SUPPORT_MSIX))
return -EINVAL;
if (!indir->size) {
tg3_flag_clear(tp, USER_INDIR_TBL);
tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
} else {
int limit;
/* Validate size and indices */
if (indir->size != TG3_RSS_INDIR_TBL_SIZE)
return -EINVAL;
if (netif_running(dev))
limit = tp->irq_cnt;
else {
limit = num_online_cpus();
if (limit > TG3_IRQ_MAX_VECS_RSS)
limit = TG3_IRQ_MAX_VECS_RSS;
}
/* The first interrupt vector only
* handles link interrupts.
*/
limit -= 1;
/* Check the indices in the table.
* Leave the existing table unmodified
* if an error is detected.
*/
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
if (indir->ring_index[i] >= limit)
return -EINVAL;
tg3_flag_set(tp, USER_INDIR_TBL);
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
tp->rss_ind_tbl[i] = indir->ring_index[i];
}
if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
return 0;
/* It is legal to write the indirection
* table while the device is running.
*/
tg3_full_lock(tp, 0);
tg3_rss_write_indir_tbl(tp);
tg3_full_unlock(tp);
return 0;
}
#endif /* !BCM_HAS_GET_RXFH_INDIR_SIZE */
#endif /* BCM_HAS_GET_RXFH_INDIR */
#ifdef __VMKLNX__
/**
@ -78,6 +473,4 @@ void *memmove(void *dest, const void *src, size_t count)
}
return dest;
}
#endif

View file

@ -1,4 +1,4 @@
/* Copyright (C) 2009-2010 Broadcom Corporation. */
/* Copyright (C) 2009-2012 Broadcom Corporation. */
#ifdef NETIF_F_TSO
#define TG3_TSO_SUPPORT 1

View file

@ -11,6 +11,8 @@
#define BCM_HAS_PCI_CHOOSE_STATE
#define BCM_HAS_PCI_ENABLE_WAKE
#define BCM_HAS_PCI_SET_POWER_STATE
#define BCM_HAS_PCI_EEH_SUPPORT
#define BCM_HAS_DEVICE_WAKEUP_API
#define BCM_HAS_PCI_DMA_MAPPING_ERROR
#define BCM_HAS_PCIE_SET_READRQ
#define BCM_HAS_PRINT_MAC
@ -22,9 +24,13 @@
#define BCM_HAS_IP_HDR
#define BCM_HAS_IP_HDRLEN
#define BCM_HAS_TCP_HDR
#define BCM_HAS_TCP_HDRLEN
#define BCM_HAS_TCP_OPTLEN
#define BCM_HAS_STRUCT_NETDEV_QUEUE
#define BCM_HAS_NETDEV_PRIV
#define BCM_USE_OLD_VLAN_INTERFACE
#define BCM_HAS_ALLOC_ETHERDEV_MQ
#define BCM_HAS_NETIF_TX_LOCK
#define BCM_HAS_DEV_DRIVER_STRING
#define BCM_HAS_DEV_NAME
#define BCM_HAS_DMA_DATA_DIRECTION

File diff suppressed because it is too large Load diff

View file

@ -1,59 +1,121 @@
/* Copyright (C) 2010 Broadcom Corporation.
* Portions Copyright (C) VMware, Inc. 2007-2011. All Rights Reserved.
/* Copyright (C) 2010 - 2012 Broadcom Corporation.
* Portions Copyright (C) VMware, Inc. 2007-2012. All Rights Reserved.
*/
struct tg3;
/*
* On ESX the wmb() instruction is defined to only a compiler barrier.
* The macro wmb() needs to be overridden to properly synchronize memory.
*/
#if defined(__VMKLNX__)
#undef wmb
#define wmb() asm volatile("sfence" ::: "memory")
#endif
static int psod_on_tx_timeout = 0;
module_param(psod_on_tx_timeout, int, 0);
MODULE_PARM_DESC(psod_on_tx_timeout, "For debugging purposes, crash the system "
" when a tx timeout occurs");
#ifndef TG3_VMWARE_NETQ_DISABLE
#define TG3_VMWARE_NETQ_ENABLE
#define TG3_MAX_NIC 32
#define TG3_OPTION_UNSET -1
static unsigned int __devinitdata tg3_netq_index;
static int __devinitdata tg3_netq_force[TG3_MAX_NIC+1] =
{ [0 ... TG3_MAX_NIC] = TG3_OPTION_UNSET };
module_param_array_named(force_netq, tg3_netq_force, int, NULL, 0);
MODULE_PARM_DESC(force_netq,
"Force the maximum number of NetQueues available per port (NetQueue capable devices only)");
static const struct {
const char string[ETH_GSTRING_LEN];
} tg3_vmware_ethtool_stats_keys[] = {
{ "[0]: rx_packets (sw)" },
{ "[0]: rx_packets (hw)" },
{ "[0]: rx_bytes (sw)" },
{ "[0]: rx_bytes (hw)" },
{ "[0]: rx_errors (sw)" },
{ "[0]: rx_errors (hw)" },
{ "[0]: rx_crc_errors" },
{ "[0]: rx_frame_errors" },
{ "[0]: tx_bytes" },
{ "[0]: tx_ucast_packets" },
{ "[0]: tx_mcast_packets" },
{ "[0]: tx_bcast_packets" },
};
/*
* Pack this structure so that we don't get an extra 8 bytes
* should this driver be built for a 128-bit CPU. :)
*/
struct tg3_netq_stats {
u64 rx_packets_sw;
u64 rx_packets_hw;
u64 rx_bytes_sw;
u64 rx_bytes_hw;
u64 rx_errors_sw;
u64 rx_errors_hw;
u64 rx_crc_errors;
u64 rx_frame_errors;
u64 tx_bytes;
u64 tx_ucast_packets;
u64 tx_mcast_packets;
u64 tx_bcast_packets;
} __attribute__((packed));
#define TG3_NETQ_NUM_STATS (sizeof(struct tg3_netq_stats)/sizeof(u64))
struct tg3_netq_napi {
u32 flags;
#define TG3_NETQ_TXQ_ALLOCATED 0x0001
#define TG3_NETQ_RXQ_ALLOCATED 0x0002
#define TG3_NETQ_RXQ_ENABLED 0x0008
#define TG3_NETQ_TXQ_FREE_STATE 0x0010
#define TG3_NETQ_RXQ_FREE_STATE 0x0020
struct tg3_netq_stats stats;
struct net_device_stats net_stats;
};
struct tg3_vmware_netq {
u16 n_tx_queues_allocated;
u16 n_rx_queues_allocated;
u32 index;
};
static void tg3_vmware_fetch_stats(struct tg3 *tp);
static void tg3_disable_prod_rcbs(struct tg3 *tp, u32 ring);
static void tg3_setup_prod_mboxes(struct tg3 *tp, u32 ring);
static void tg3_netq_init(struct tg3 *tp);
static void tg3_netq_free_all_qs(struct tg3 *tp);
static void tg3_netq_invalidate_state(struct tg3 *tp);
static void tg3_netq_restore(struct tg3 *tp);
static void tg3_netq_limit_dflt_queue_counts(struct tg3 *tp);
static u32 tg3_netq_tune_vector_count(struct tg3 *tp);
static int tg3_netq_stats_size(struct tg3 *tp);
static void tg3_netq_stats_get_strings(struct tg3 *tp, u8 *buf);
static void tg3_netq_stats_get(struct tg3 *tp, u64 *tmp_stats);
static void tg3_netq_stats_clear(struct tg3 *tp);
#endif /* TG3_VMWARE_NETQ_ENABLE */
struct tg3_vmware {
u32 rx_mode_reset_counter;
#ifdef TG3_VMWARE_NETQ_ENABLE
struct tg3_vmware_netq netq;
#endif
};
#if !defined(TG3_VMWARE_BMAPILNX_DISABLE)
#define SIOTG3CIM 0x89F0
#define TG3_VMWARE_CIM_CMD_ENABLE_NIC 0x0001
#define TG3_VMWARE_CIM_CMD_DISABLE_NIC 0x0002
#define TG3_VMWARE_CIM_CMD_REG_READ 0x0003
#define TG3_VMWARE_CIM_CMD_REG_WRITE 0x0004
#define TG3_VMWARE_CIM_CMD_GET_NIC_PARAM 0x0005
#define TG3_VMWARE_CIM_CMD_GET_NIC_STATUS 0x0006
struct tg3_ioctl_reg_read_req
{
u32 reg_offset;
u32 reg_value;
} __attribute__((packed));
struct tg3_ioctl_reg_write_req
{
u32 reg_offset;
u32 reg_value;
} __attribute__((packed));
struct tg3_ioctl_get_nic_param_req
{
u32 version;
u32 mtu;
u8 current_mac_addr[8];
} __attribute__((packed));
struct tg3_ioctl_get_nic_status_req
{
u32 nic_status; // 1: Up, 0: Down
} __attribute__((packed));
struct tg3_ioctl_req
{
u32 cmd;
union {
// no struct for reset_nic command
struct tg3_ioctl_reg_read_req reg_read;
struct tg3_ioctl_reg_write_req reg_write;
struct tg3_ioctl_get_nic_param_req get_nic_param;
struct tg3_ioctl_get_nic_status_req get_nic_status;
} cmd_req;
} __attribute__((packed));
#include "esx_ioctl.h"
static int
tg3_vmware_ioctl_cim(struct net_device *dev, struct ifreq *ifr);

View file

@ -1807,6 +1807,11 @@ static int fcoe_ctlr_recv_vlan_notification(struct fcoe_ctlr *fip, struct fip_he
if (!old_vlan_valid && (new_vlan != 0)) {
struct net_device *netdev;
if (new_vlan > VLAN_MAX_VALID_VID) {
LIBFCOE_FIP_DBG(fip, "invalid vlan id %d, ignored\n", new_vlan);
return -1;
}
fip->vlan_id = new_vlan;
netdev = (fip->lp)->tt.get_cna_netdev(fip->lp);

View file

@ -2176,8 +2176,13 @@ lpfc_get_host_port_state(struct Scsi_Host *shost)
case LPFC_LINK_UP:
case LPFC_CLEAR_LA:
case LPFC_HBA_READY:
/* Links up, beyond this port_type reports state */
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
/* Links up, reports port state accordingly */
if (vport->port_state < LPFC_VPORT_READY)
fc_host_port_state(shost) =
FC_PORTSTATE_BYPASSED;
else
fc_host_port_state(shost) =
FC_PORTSTATE_ONLINE;
break;
case LPFC_HBA_ERROR:
fc_host_port_state(shost) = FC_PORTSTATE_ERROR;

View file

@ -2287,10 +2287,12 @@ mptsas_test_unit_ready(MPT_ADAPTER *ioc, u8 channel, u8 id, u16 count)
}
/*
* LU becoming ready, or
* LU hasn't self-configured yet
* LU hasn't self-configured yet, or
* LU is in asymmetric access state transition
*/
if ((asc == 0x04 && ascq == 0x01) ||
(asc == 0x04 && ascq == 0x11) ||
(asc == 0x04 && ascq == 0x0a) ||
asc == 0x3e) {
state = DEVICE_RETRY;
break;

View file

@ -1008,17 +1008,6 @@ static int usbhid_start(struct hid_device *hid)
}
}
init_waitqueue_head(&usbhid->wait);
INIT_WORK(&usbhid->reset_work, hid_reset);
INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues);
setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
spin_lock_init(&usbhid->lock);
spin_lock_init(&usbhid->lock);
usbhid->intf = intf;
usbhid->ifnum = interface->desc.bInterfaceNumber;
usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL);
if (!usbhid->urbctrl) {
ret = -ENOMEM;
@ -1214,12 +1203,20 @@ static int hid_probe(struct usb_interface *intf, const struct usb_device_id *id)
hid->driver_data = usbhid;
usbhid->hid = hid;
usbhid->intf = intf;
usbhid->ifnum = interface->desc.bInterfaceNumber;
#if defined(__VMKLNX__)
if (interface->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_KEYBOARD)
vmklnx_hcd_register_kbd_handler(intf);
#endif
init_waitqueue_head(&usbhid->wait);
INIT_WORK(&usbhid->reset_work, hid_reset);
INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues);
setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
spin_lock_init(&usbhid->lock);
ret = hid_add_device(hid);
if (ret) {
if (ret != -ENODEV)

View file

@ -1,5 +1,5 @@
/* ****************************************************************
* Portions Copyright 1998, 2010 VMware, Inc.
* Portions Copyright 1998, 2010, 2012 VMware, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -2288,7 +2288,7 @@ retry:
adapter->constraints.sgMaxEntries = MAX_SEGMENTS;
adapter->constraints.sgElemSizeMult = SECTOR_SIZE;
adapter->constraints.sgElemAlignment = 0;
adapter->constraints.sgElemStraddle = 0;
adapter->constraints.sgElemStraddle = DMA_32BIT_MASK + 1;
VMK_ASSERT(strlen(vmk_NameToString(&adapterName)) < sizeof(adapter->name));
vmk_NameFormat(&adapter->name, "%s", vmk_NameToString(&adapterName));
@ -2315,10 +2315,10 @@ retry:
return retval;
unrollBlockAdapter:
kfree(&pBlockAdapter);
kfree(pBlockAdapter);
unrollBlockDevice:
kfree(&blockDevices[major]);
kfree(blockDevices[major]);
blockDevices[major] = NULL;
return -ENOMEM;
@ -2372,7 +2372,7 @@ unregister_blkdev(unsigned int major, const char *name)
}
if (bd->disks) {
kfree(&bd->disks);
kfree(bd->disks);
}
status = vmk_ScsiUnregisterAdapter(bd->adapter);
@ -2382,10 +2382,10 @@ unregister_blkdev(unsigned int major, const char *name)
/* Free up the block mgmt Structure */
bd->adapter->mgmtAdapter.transport = VMK_STORAGE_ADAPTER_TRANSPORT_UNKNOWN;
kfree(&bd->adapter->mgmtAdapter.t.block);
kfree(bd->adapter->mgmtAdapter.t.block);
vmk_ScsiFreeAdapter(bd->adapter);
kfree(&bd);
kfree(bd);
blockDevices[major] = NULL;
VMKLNX_DEBUG(2, "Device %s unregistered.", name);

View file

@ -262,6 +262,12 @@ map_skb_to_pkt(struct sk_buff *skb)
if (skb_shinfo(skb)->gso_type != 0) {
switch (skb_shinfo(skb)->gso_type) {
case SKB_GSO_TCPV4:
if (unlikely(skb_shinfo(skb)->gso_size == 0)) {
printk("dropping LRO packet with zero gso_size\n");
VMK_ASSERT(VMK_FALSE);
goto drop;
}
status = vmk_PktSetLargeTcpPacket(pkt, skb_shinfo(skb)->gso_size);
VMK_ASSERT(status == VMK_OK);
break;
@ -451,7 +457,7 @@ netif_receive_skb(struct sk_buff *skb)
if (vmk_NetPollGetCurrent(&pollPriv) == VMK_OK) {
napi = (struct napi_struct *)vmk_NetPollGetPrivate(pollPriv);
}
if (!napi) {
if (!napi || vmk_SystemCheckState(VMK_SYSTEM_STATE_PANIC)) {
pkt = skb->pkt;
status = map_skb_to_pkt(skb);
if (likely(status == NET_RX_SUCCESS)) {

View file

@ -3177,9 +3177,12 @@ SCSILinuxWorldletFn(vmk_Worldlet wdt, void *data,
yield += vmk_TimerUSToTC(100);
}
vmk_WorldletAffinityTrackerCheck(tls->tracker, now);
spin_unlock_irqrestore(&tls->lock, flags);
if (tls->tracker != NULL) {
vmk_WorldletAffinityTrackerCheck(tls->tracker, now);
}
vInfo = SCSILinuxGetVectorInfo(tls->vmk26Adap, tls->activatingVector);
if (vInfo != NULL && tls->worldlet != NULL) {
vmk_WorldletInterruptTracker *intTracker = vInfo->intTracker;

View file

@ -165,10 +165,8 @@ static void vmklnx_scsi_update_lun_path(struct scsi_device *sdev, void *data);
* scsi/scsi_host.h
* \par ESX Deviation Notes:
* This interface will assume a default value for
* Scsi_Host->dma_boundary to be 0 if the Scsi Host template does
* not specify a value for dma_boundary. This is different from
* the linux behavior which defaults to a 4G boundary in a similar
* situation.
* Scsi_Host->dma_boundary to be 4G if the Scsi Host template does
* not specify it, this is to make it compatible with Linux behavior
* \sa None.
**********************************************************************
*/
@ -181,10 +179,8 @@ static void vmklnx_scsi_update_lun_path(struct scsi_device *sdev, void *data);
*
* ESX Deviation Notes:
* This interface will assume a default value for
* Scsi_Host->dma_boundary to be 0 if the Scsi Host template does
* not specify a value for dma_boundary. This is different from
* the linux behavior which defaults to a 4G boundary in a similar
* situation.
* Scsi_Host->dma_boundary to be 4G if the Scsi Host template does
* not specify it, this is to make it compatible with Linux behavior.
*
* RETURN VALUE:
* On Success pointer to the newly allocated Scsi_Host structure,
@ -270,7 +266,8 @@ scsi_host_alloc(struct scsi_host_template *sht, int privateSize)
if (sht->dma_boundary) {
sh->dma_boundary = sht->dma_boundary;
} else {
sh->dma_boundary = 0; // avoid the checking of all SGE by upper layer
/* PR 842721 */
sh->dma_boundary = DMA_32BIT_MASK;
}
retval = scsi_setup_command_freelist(sh);