mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git/
synced 2026-05-02 18:15:03 -04:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
S390 bpf_jit.S is removed in net-next and had changes in 'net', since that code isn't used any more take the removal. TLS data structures split the TX and RX components in 'net-next', put the new struct members from the bug fix in 'net' into the RX part. The 'net-next' tree had some reworking of how the ERSPAN code works in the GRE tunneling code, overlapping with a one-line headroom calculation fix in 'net'. Overlapping changes in __sock_map_ctx_update_elem(), keep the bits that read the prog members via READ_ONCE() into local variables before using them. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -369,6 +369,11 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
|
||||
|
||||
gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
|
||||
while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
|
||||
/* Prevent any &gdesc->tcd field from being (speculatively)
|
||||
* read before (&gdesc->tcd)->gen is read.
|
||||
*/
|
||||
dma_rmb();
|
||||
|
||||
completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
|
||||
&gdesc->tcd), tq, adapter->pdev,
|
||||
adapter);
|
||||
@@ -1103,6 +1108,11 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
||||
gdesc->txd.tci = skb_vlan_tag_get(skb);
|
||||
}
|
||||
|
||||
/* Ensure that the write to (&gdesc->txd)->gen will be observed after
|
||||
* all other writes to &gdesc->txd.
|
||||
*/
|
||||
dma_wmb();
|
||||
|
||||
/* finally flips the GEN bit of the SOP desc. */
|
||||
gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
|
||||
VMXNET3_TXD_GEN);
|
||||
@@ -1298,6 +1308,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
/* Prevent any rcd field from being (speculatively) read before
|
||||
* rcd->gen is read.
|
||||
*/
|
||||
dma_rmb();
|
||||
|
||||
BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
|
||||
rcd->rqID != rq->dataRingQid);
|
||||
idx = rcd->rxdIdx;
|
||||
@@ -1528,6 +1544,12 @@ rcd_done:
|
||||
ring->next2comp = idx;
|
||||
num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
|
||||
ring = rq->rx_ring + ring_idx;
|
||||
|
||||
/* Ensure that the writes to rxd->gen bits will be observed
|
||||
* after all other writes to rxd objects.
|
||||
*/
|
||||
dma_wmb();
|
||||
|
||||
while (num_to_alloc) {
|
||||
vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
|
||||
&rxCmdDesc);
|
||||
@@ -2688,7 +2710,7 @@ vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
|
||||
/* ==================== initialization and cleanup routines ============ */
|
||||
|
||||
static int
|
||||
vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
|
||||
vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
|
||||
{
|
||||
int err;
|
||||
unsigned long mmio_start, mmio_len;
|
||||
@@ -2700,30 +2722,12 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
|
||||
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
|
||||
dev_err(&pdev->dev,
|
||||
"pci_set_consistent_dma_mask failed\n");
|
||||
err = -EIO;
|
||||
goto err_set_mask;
|
||||
}
|
||||
*dma64 = true;
|
||||
} else {
|
||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
|
||||
dev_err(&pdev->dev,
|
||||
"pci_set_dma_mask failed\n");
|
||||
err = -EIO;
|
||||
goto err_set_mask;
|
||||
}
|
||||
*dma64 = false;
|
||||
}
|
||||
|
||||
err = pci_request_selected_regions(pdev, (1 << 2) - 1,
|
||||
vmxnet3_driver_name);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev,
|
||||
"Failed to request region for adapter: error %d\n", err);
|
||||
goto err_set_mask;
|
||||
goto err_enable_device;
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
@@ -2751,7 +2755,7 @@ err_bar1:
|
||||
iounmap(adapter->hw_addr0);
|
||||
err_ioremap:
|
||||
pci_release_selected_regions(pdev, (1 << 2) - 1);
|
||||
err_set_mask:
|
||||
err_enable_device:
|
||||
pci_disable_device(pdev);
|
||||
return err;
|
||||
}
|
||||
@@ -3254,7 +3258,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
|
||||
#endif
|
||||
};
|
||||
int err;
|
||||
bool dma64 = false; /* stupid gcc */
|
||||
bool dma64;
|
||||
u32 ver;
|
||||
struct net_device *netdev;
|
||||
struct vmxnet3_adapter *adapter;
|
||||
@@ -3300,6 +3304,24 @@ vmxnet3_probe_device(struct pci_dev *pdev,
|
||||
adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
|
||||
adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
|
||||
|
||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
|
||||
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
|
||||
dev_err(&pdev->dev,
|
||||
"pci_set_consistent_dma_mask failed\n");
|
||||
err = -EIO;
|
||||
goto err_set_mask;
|
||||
}
|
||||
dma64 = true;
|
||||
} else {
|
||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
|
||||
dev_err(&pdev->dev,
|
||||
"pci_set_dma_mask failed\n");
|
||||
err = -EIO;
|
||||
goto err_set_mask;
|
||||
}
|
||||
dma64 = false;
|
||||
}
|
||||
|
||||
spin_lock_init(&adapter->cmd_lock);
|
||||
adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
|
||||
sizeof(struct vmxnet3_adapter),
|
||||
@@ -3307,7 +3329,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
|
||||
if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
|
||||
dev_err(&pdev->dev, "Failed to map dma\n");
|
||||
err = -EFAULT;
|
||||
goto err_dma_map;
|
||||
goto err_set_mask;
|
||||
}
|
||||
adapter->shared = dma_alloc_coherent(
|
||||
&adapter->pdev->dev,
|
||||
@@ -3358,7 +3380,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
|
||||
}
|
||||
#endif /* VMXNET3_RSS */
|
||||
|
||||
err = vmxnet3_alloc_pci_resources(adapter, &dma64);
|
||||
err = vmxnet3_alloc_pci_resources(adapter);
|
||||
if (err < 0)
|
||||
goto err_alloc_pci;
|
||||
|
||||
@@ -3504,7 +3526,7 @@ err_alloc_queue_desc:
|
||||
err_alloc_shared:
|
||||
dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
|
||||
sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
|
||||
err_dma_map:
|
||||
err_set_mask:
|
||||
free_netdev(netdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -69,10 +69,12 @@
|
||||
/*
|
||||
* Version numbers
|
||||
*/
|
||||
#define VMXNET3_DRIVER_VERSION_STRING "1.4.14.0-k"
|
||||
#define VMXNET3_DRIVER_VERSION_STRING "1.4.16.0-k"
|
||||
|
||||
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
|
||||
#define VMXNET3_DRIVER_VERSION_NUM 0x01040e00
|
||||
/* Each byte of this 32-bit integer encodes a version number in
|
||||
* VMXNET3_DRIVER_VERSION_STRING.
|
||||
*/
|
||||
#define VMXNET3_DRIVER_VERSION_NUM 0x01041000
|
||||
|
||||
#if defined(CONFIG_PCI_MSI)
|
||||
/* RSS only makes sense if MSI-X is supported. */
|
||||
|
||||
Reference in New Issue
Block a user