Merge branch 'virtio_net-enable-premapped-mode-by-default'

Xuan Zhuo says:

====================
virtio_net: enable premapped mode by default

v1:
    1. fix some small problems
    2. remove commit "virtio_net: introduce vi->mode"

In the last linux version, we disabled this feature to fix the
regress[1].

The patch set is try to fix the problem and re-enable it.

More info: http://lore.kernel.org/all/20240820071913.68004-1-xuanzhuo@linux.alibaba.com

[1]: http://lore.kernel.org/all/8b20cc28-45a9-4643-8e87-ba164a540c0a@oracle.com
====================

Link: https://patch.msgid.link/20241029084615.91049-1-xuanzhuo@linux.alibaba.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Paolo Abeni
2024-11-05 11:39:26 +01:00

View File

@@ -356,9 +356,6 @@ struct receive_queue {
struct xdp_rxq_info xsk_rxq_info;
struct xdp_buff **xsk_buffs;
/* Do dma by self */
bool do_dma;
};
/* This structure can contain rss message with maximum settings for indirection table and keysize
@@ -856,11 +853,14 @@ ok:
static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
struct page *page = virt_to_head_page(buf);
struct virtnet_rq_dma *dma;
void *head;
int offset;
BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
head = page_address(page);
dma = head;
@@ -885,10 +885,13 @@ static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
void *buf;
BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
if (buf && rq->do_dma)
if (buf)
virtnet_rq_unmap(rq, buf, *len);
return buf;
@@ -896,15 +899,13 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
struct virtnet_rq_dma *dma;
dma_addr_t addr;
u32 offset;
void *head;
if (!rq->do_dma) {
sg_init_one(rq->sg, buf, len);
return;
}
BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
head = page_address(rq->alloc_frag.page);
@@ -922,53 +923,51 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
{
struct page_frag *alloc_frag = &rq->alloc_frag;
struct virtnet_info *vi = rq->vq->vdev->priv;
struct virtnet_rq_dma *dma;
void *buf, *head;
dma_addr_t addr;
if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp)))
return NULL;
BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
head = page_address(alloc_frag->page);
if (rq->do_dma) {
dma = head;
dma = head;
/* new pages */
if (!alloc_frag->offset) {
if (rq->last_dma) {
/* Now, the new page is allocated, the last dma
* will not be used. So the dma can be unmapped
* if the ref is 0.
*/
virtnet_rq_unmap(rq, rq->last_dma, 0);
rq->last_dma = NULL;
}
dma->len = alloc_frag->size - sizeof(*dma);
addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
dma->len, DMA_FROM_DEVICE, 0);
if (virtqueue_dma_mapping_error(rq->vq, addr))
return NULL;
dma->addr = addr;
dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
/* Add a reference to dma to prevent the entire dma from
* being released during error handling. This reference
* will be freed after the pages are no longer used.
/* new pages */
if (!alloc_frag->offset) {
if (rq->last_dma) {
/* Now, the new page is allocated, the last dma
* will not be used. So the dma can be unmapped
* if the ref is 0.
*/
get_page(alloc_frag->page);
dma->ref = 1;
alloc_frag->offset = sizeof(*dma);
rq->last_dma = dma;
virtnet_rq_unmap(rq, rq->last_dma, 0);
rq->last_dma = NULL;
}
++dma->ref;
dma->len = alloc_frag->size - sizeof(*dma);
addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
dma->len, DMA_FROM_DEVICE, 0);
if (virtqueue_dma_mapping_error(rq->vq, addr))
return NULL;
dma->addr = addr;
dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
/* Add a reference to dma to prevent the entire dma from
* being released during error handling. This reference
* will be freed after the pages are no longer used.
*/
get_page(alloc_frag->page);
dma->ref = 1;
alloc_frag->offset = sizeof(*dma);
rq->last_dma = dma;
}
++dma->ref;
buf = head + alloc_frag->offset;
get_page(alloc_frag->page);
@@ -990,7 +989,7 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
return;
}
if (rq->do_dma)
if (!vi->big_packets || vi->mergeable_rx_bufs)
virtnet_rq_unmap(rq, buf, 0);
virtnet_rq_free_buf(vi, rq, buf);
@@ -2423,6 +2422,9 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
len = SKB_DATA_ALIGN(len) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
if (unlikely(!skb_page_frag_refill(len, &rq->alloc_frag, gfp)))
return -ENOMEM;
buf = virtnet_rq_alloc(rq, len, gfp);
if (unlikely(!buf))
return -ENOMEM;
@@ -2433,8 +2435,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
if (rq->do_dma)
virtnet_rq_unmap(rq, buf, 0);
virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}
@@ -2525,6 +2526,12 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
*/
len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
return -ENOMEM;
if (!alloc_frag->offset && len + room + sizeof(struct virtnet_rq_dma) > alloc_frag->size)
len -= sizeof(struct virtnet_rq_dma);
buf = virtnet_rq_alloc(rq, len + room, gfp);
if (unlikely(!buf))
return -ENOMEM;
@@ -2548,8 +2555,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
ctx = mergeable_len_to_ctx(len + room, headroom);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
if (rq->do_dma)
virtnet_rq_unmap(rq, buf, 0);
virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}
@@ -2706,7 +2712,7 @@ static int virtnet_receive_packets(struct virtnet_info *vi,
}
} else {
while (packets < budget &&
(buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
receive_buf(vi, rq, buf, len, NULL, xdp_xmit, stats);
packets++;
}
@@ -5916,7 +5922,7 @@ static void free_receive_page_frags(struct virtnet_info *vi)
int i;
for (i = 0; i < vi->max_queue_pairs; i++)
if (vi->rq[i].alloc_frag.page) {
if (vi->rq[i].do_dma && vi->rq[i].last_dma)
if (vi->rq[i].last_dma)
virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
put_page(vi->rq[i].alloc_frag.page);
}
@@ -6101,6 +6107,15 @@ err_ctrl:
return -ENOMEM;
}
static void virtnet_rq_set_premapped(struct virtnet_info *vi)
{
int i;
for (i = 0; i < vi->max_queue_pairs; i++)
/* error should never happen */
BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq));
}
static int init_vqs(struct virtnet_info *vi)
{
int ret;
@@ -6114,6 +6129,10 @@ static int init_vqs(struct virtnet_info *vi)
if (ret)
goto err_free;
/* disable for big mode */
if (!vi->big_packets || vi->mergeable_rx_bufs)
virtnet_rq_set_premapped(vi);
cpus_read_lock();
virtnet_set_affinity(vi);
cpus_read_unlock();