mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-04-18 03:23:53 -04:00
virtio_ring: determine descriptor flags at one time
Let's determine the last descriptor by counting the number of sg. This would be consistent with packed virtqueue implementation and ease the future in-order implementation. Acked-by: Eugenio Pérez <eperezma@redhat.com> Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Message-Id: <20251230064649.55597-15-jasowang@redhat.com>
This commit is contained in:
committed by
Michael S. Tsirkin
parent
1208473f9b
commit
03f05c4eeb
@@ -574,7 +574,7 @@ static inline int virtqueue_add_split(struct vring_virtqueue *vq,
|
||||
struct vring_desc_extra *extra;
|
||||
struct scatterlist *sg;
|
||||
struct vring_desc *desc;
|
||||
unsigned int i, n, avail, descs_used, prev, err_idx;
|
||||
unsigned int i, n, avail, descs_used, err_idx, sg_count = 0;
|
||||
int head;
|
||||
bool indirect;
|
||||
|
||||
@@ -634,42 +634,40 @@ static inline int virtqueue_add_split(struct vring_virtqueue *vq,
|
||||
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
|
||||
dma_addr_t addr;
|
||||
u32 len;
|
||||
u16 flags = 0;
|
||||
|
||||
if (++sg_count != total_sg)
|
||||
flags |= VRING_DESC_F_NEXT;
|
||||
|
||||
if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr, &len, premapped))
|
||||
goto unmap_release;
|
||||
|
||||
prev = i;
|
||||
/* Note that we trust indirect descriptor
|
||||
* table since it use stream DMA mapping.
|
||||
*/
|
||||
i = virtqueue_add_desc_split(vq, desc, extra, i, addr, len,
|
||||
VRING_DESC_F_NEXT,
|
||||
premapped);
|
||||
i = virtqueue_add_desc_split(vq, desc, extra, i, addr,
|
||||
len, flags, premapped);
|
||||
}
|
||||
}
|
||||
for (; n < (out_sgs + in_sgs); n++) {
|
||||
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
|
||||
dma_addr_t addr;
|
||||
u32 len;
|
||||
u16 flags = VRING_DESC_F_WRITE;
|
||||
|
||||
if (++sg_count != total_sg)
|
||||
flags |= VRING_DESC_F_NEXT;
|
||||
|
||||
if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr, &len, premapped))
|
||||
goto unmap_release;
|
||||
|
||||
prev = i;
|
||||
/* Note that we trust indirect descriptor
|
||||
* table since it use stream DMA mapping.
|
||||
*/
|
||||
i = virtqueue_add_desc_split(vq, desc, extra, i, addr, len,
|
||||
VRING_DESC_F_NEXT |
|
||||
VRING_DESC_F_WRITE,
|
||||
premapped);
|
||||
i = virtqueue_add_desc_split(vq, desc, extra, i, addr,
|
||||
len, flags, premapped);
|
||||
}
|
||||
}
|
||||
/* Last one doesn't continue. */
|
||||
desc[prev].flags &= cpu_to_virtio16(vq->vq.vdev, ~VRING_DESC_F_NEXT);
|
||||
if (!indirect && vring_need_unmap_buffer(vq, &extra[prev]))
|
||||
vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
|
||||
~VRING_DESC_F_NEXT;
|
||||
|
||||
if (indirect) {
|
||||
/* Now that the indirect table is filled in, map it. */
|
||||
|
||||
Reference in New Issue
Block a user