Skip to content

Commit 9f11a0a

Browse files
tpetazzonigregkh
authored andcommitted
net: mvpp2: fix dma unmapping of TX buffers for fragments
commit 8354491c9d5b06709384cea91d13019bf5e61449 upstream. Since commit 71ce391 ("net: mvpp2: enable proper per-CPU TX buffers unmapping"), we are not correctly DMA unmapping TX buffers for fragments. Indeed, the mvpp2_txq_inc_put() function only stores in the txq_cpu->tx_buffs[] array the physical address of the buffer to be DMA-unmapped when skb != NULL. In addition, when DMA-unmapping, we use skb_headlen(skb) to get the size to be unmapped. Both of this works fine for TX descriptors that are associated directly to a SKB, but not the ones that are used for fragments, with a NULL pointer as skb: - We have a NULL physical address when calling DMA unmap - skb_headlen(skb) crashes because skb is NULL This causes random crashes when fragments are used. To solve this problem, we need to: - Store the physical address of the buffer to be unmapped unconditionally, regardless of whether it is tied to a SKB or not. - Store the length of the buffer to be unmapped, which requires a new field. Instead of adding a third array to store the length of the buffer to be unmapped, and as suggested by David Miller, this commit refactors the tx_buffs[] and tx_skb[] arrays of 'struct mvpp2_txq_pcpu' into a separate structure 'mvpp2_txq_pcpu_buf', to which a 'size' field is added. Therefore, instead of having three arrays to allocate/free, we have a single one, which also improve data locality, reducing the impact on the CPU cache. Fixes: 71ce391 ("net: mvpp2: enable proper per-CPU TX buffers unmapping") Reported-by: Raphael G <raphael.glon@corp.ovh.com> Cc: Raphael G <raphael.glon@corp.ovh.com> Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent d857273 commit 9f11a0a

1 file changed

Lines changed: 30 additions & 29 deletions

File tree

  • drivers/net/ethernet/marvell

drivers/net/ethernet/marvell/mvpp2.c

Lines changed: 30 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -772,6 +772,17 @@ struct mvpp2_rx_desc {
772772
u32 reserved8;
773773
};
774774

775+
struct mvpp2_txq_pcpu_buf {
776+
/* Transmitted SKB */
777+
struct sk_buff *skb;
778+
779+
/* Physical address of transmitted buffer */
780+
dma_addr_t phys;
781+
782+
/* Size transmitted */
783+
size_t size;
784+
};
785+
775786
/* Per-CPU Tx queue control */
776787
struct mvpp2_txq_pcpu {
777788
int cpu;
@@ -787,11 +798,8 @@ struct mvpp2_txq_pcpu {
787798
/* Number of Tx DMA descriptors reserved for each CPU */
788799
int reserved_num;
789800

790-
/* Array of transmitted skb */
791-
struct sk_buff **tx_skb;
792-
793-
/* Array of transmitted buffers' physical addresses */
794-
dma_addr_t *tx_buffs;
801+
/* Infos about transmitted buffers */
802+
struct mvpp2_txq_pcpu_buf *buffs;
795803

796804
/* Index of last TX DMA descriptor that was inserted */
797805
int txq_put_index;
@@ -981,10 +989,11 @@ static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
981989
struct sk_buff *skb,
982990
struct mvpp2_tx_desc *tx_desc)
983991
{
984-
txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
985-
if (skb)
986-
txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
987-
tx_desc->buf_phys_addr;
992+
struct mvpp2_txq_pcpu_buf *tx_buf =
993+
txq_pcpu->buffs + txq_pcpu->txq_put_index;
994+
tx_buf->skb = skb;
995+
tx_buf->size = tx_desc->data_size;
996+
tx_buf->phys = tx_desc->buf_phys_addr;
988997
txq_pcpu->txq_put_index++;
989998
if (txq_pcpu->txq_put_index == txq_pcpu->size)
990999
txq_pcpu->txq_put_index = 0;
@@ -4403,17 +4412,16 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
44034412
int i;
44044413

44054414
for (i = 0; i < num; i++) {
4406-
dma_addr_t buf_phys_addr =
4407-
txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
4408-
struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
4415+
struct mvpp2_txq_pcpu_buf *tx_buf =
4416+
txq_pcpu->buffs + txq_pcpu->txq_get_index;
44094417

44104418
mvpp2_txq_inc_get(txq_pcpu);
44114419

4412-
dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
4413-
skb_headlen(skb), DMA_TO_DEVICE);
4414-
if (!skb)
4420+
dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
4421+
tx_buf->size, DMA_TO_DEVICE);
4422+
if (!tx_buf->skb)
44154423
continue;
4416-
dev_kfree_skb_any(skb);
4424+
dev_kfree_skb_any(tx_buf->skb);
44174425
}
44184426
}
44194427

@@ -4664,15 +4672,10 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
46644672
for_each_present_cpu(cpu) {
46654673
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
46664674
txq_pcpu->size = txq->size;
4667-
txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
4668-
sizeof(*txq_pcpu->tx_skb),
4669-
GFP_KERNEL);
4670-
if (!txq_pcpu->tx_skb)
4671-
goto error;
4672-
4673-
txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
4674-
sizeof(dma_addr_t), GFP_KERNEL);
4675-
if (!txq_pcpu->tx_buffs)
4675+
txq_pcpu->buffs = kmalloc(txq_pcpu->size *
4676+
sizeof(struct mvpp2_txq_pcpu_buf),
4677+
GFP_KERNEL);
4678+
if (!txq_pcpu->buffs)
46764679
goto error;
46774680

46784681
txq_pcpu->count = 0;
@@ -4686,8 +4689,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
46864689
error:
46874690
for_each_present_cpu(cpu) {
46884691
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4689-
kfree(txq_pcpu->tx_skb);
4690-
kfree(txq_pcpu->tx_buffs);
4692+
kfree(txq_pcpu->buffs);
46914693
}
46924694

46934695
dma_free_coherent(port->dev->dev.parent,
@@ -4706,8 +4708,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
47064708

47074709
for_each_present_cpu(cpu) {
47084710
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4709-
kfree(txq_pcpu->tx_skb);
4710-
kfree(txq_pcpu->tx_buffs);
4711+
kfree(txq_pcpu->buffs);
47114712
}
47124713

47134714
if (txq->descs)

0 commit comments

Comments
 (0)