[GEDI] [PATCH v7 13/13] virtio-blk: use BDRV_REQ_REGISTERED_BUF optimization hint
Stefan Hajnoczi
stefanha at redhat.com
Thu Oct 13 18:59:08 UTC 2022
Register guest RAM using BlockRAMRegistrar and set the
BDRV_REQ_REGISTERED_BUF flag so block drivers can optimize memory
accesses in I/O requests.
This is for vdpa-blk, vhost-user-blk, and other I/O interfaces that rely
on DMA mapping/unmapping.
Signed-off-by: Stefan Hajnoczi <stefanha at redhat.com>
---
include/hw/virtio/virtio-blk.h | 2 ++
hw/block/virtio-blk.c | 39 ++++++++++++++++++++++------------
2 files changed, 27 insertions(+), 14 deletions(-)
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
index d311c57cca..7f589b4146 100644
--- a/include/hw/virtio/virtio-blk.h
+++ b/include/hw/virtio/virtio-blk.h
@@ -19,6 +19,7 @@
#include "hw/block/block.h"
#include "sysemu/iothread.h"
#include "sysemu/block-backend.h"
+#include "sysemu/block-ram-registrar.h"
#include "qom/object.h"
#define TYPE_VIRTIO_BLK "virtio-blk-device"
@@ -64,6 +65,7 @@ struct VirtIOBlock {
struct VirtIOBlockDataPlane *dataplane;
uint64_t host_features;
size_t config_size;
+ BlockRAMRegistrar blk_ram_registrar;
};
typedef struct VirtIOBlockReq {
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 8131ec2dbc..f717550fdc 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -21,6 +21,7 @@
#include "hw/block/block.h"
#include "hw/qdev-properties.h"
#include "sysemu/blockdev.h"
+#include "sysemu/block-ram-registrar.h"
#include "sysemu/sysemu.h"
#include "sysemu/runstate.h"
#include "hw/virtio/virtio-blk.h"
@@ -362,12 +363,14 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
}
}
-static inline void submit_requests(BlockBackend *blk, MultiReqBuffer *mrb,
+static inline void submit_requests(VirtIOBlock *s, MultiReqBuffer *mrb,
int start, int num_reqs, int niov)
{
+ BlockBackend *blk = s->blk;
QEMUIOVector *qiov = &mrb->reqs[start]->qiov;
int64_t sector_num = mrb->reqs[start]->sector_num;
bool is_write = mrb->is_write;
+ BdrvRequestFlags flags = 0;
if (num_reqs > 1) {
int i;
@@ -398,12 +401,18 @@ static inline void submit_requests(BlockBackend *blk, MultiReqBuffer *mrb,
num_reqs - 1);
}
+ if (blk_ram_registrar_ok(&s->blk_ram_registrar)) {
+ flags |= BDRV_REQ_REGISTERED_BUF;
+ }
+
if (is_write) {
- blk_aio_pwritev(blk, sector_num << BDRV_SECTOR_BITS, qiov, 0,
- virtio_blk_rw_complete, mrb->reqs[start]);
+ blk_aio_pwritev(blk, sector_num << BDRV_SECTOR_BITS, qiov,
+ flags, virtio_blk_rw_complete,
+ mrb->reqs[start]);
} else {
- blk_aio_preadv(blk, sector_num << BDRV_SECTOR_BITS, qiov, 0,
- virtio_blk_rw_complete, mrb->reqs[start]);
+ blk_aio_preadv(blk, sector_num << BDRV_SECTOR_BITS, qiov,
+ flags, virtio_blk_rw_complete,
+ mrb->reqs[start]);
}
}
@@ -425,14 +434,14 @@ static int multireq_compare(const void *a, const void *b)
}
}
-static void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb)
+static void virtio_blk_submit_multireq(VirtIOBlock *s, MultiReqBuffer *mrb)
{
int i = 0, start = 0, num_reqs = 0, niov = 0, nb_sectors = 0;
uint32_t max_transfer;
int64_t sector_num = 0;
if (mrb->num_reqs == 1) {
- submit_requests(blk, mrb, 0, 1, -1);
+ submit_requests(s, mrb, 0, 1, -1);
mrb->num_reqs = 0;
return;
}
@@ -452,11 +461,11 @@ static void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb)
* 3. merge would exceed maximum transfer length of backend device
*/
if (sector_num + nb_sectors != req->sector_num ||
- niov > blk_get_max_iov(blk) - req->qiov.niov ||
+ niov > blk_get_max_iov(s->blk) - req->qiov.niov ||
req->qiov.size > max_transfer ||
nb_sectors > (max_transfer -
req->qiov.size) / BDRV_SECTOR_SIZE) {
- submit_requests(blk, mrb, start, num_reqs, niov);
+ submit_requests(s, mrb, start, num_reqs, niov);
num_reqs = 0;
}
}
@@ -472,7 +481,7 @@ static void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb)
num_reqs++;
}
- submit_requests(blk, mrb, start, num_reqs, niov);
+ submit_requests(s, mrb, start, num_reqs, niov);
mrb->num_reqs = 0;
}
@@ -487,7 +496,7 @@ static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb)
* Make sure all outstanding writes are posted to the backing device.
*/
if (mrb->is_write && mrb->num_reqs > 0) {
- virtio_blk_submit_multireq(s->blk, mrb);
+ virtio_blk_submit_multireq(s, mrb);
}
blk_aio_flush(s->blk, virtio_blk_flush_complete, req);
}
@@ -667,7 +676,7 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
if (mrb->num_reqs > 0 && (mrb->num_reqs == VIRTIO_BLK_MAX_MERGE_REQS ||
is_write != mrb->is_write ||
!s->conf.request_merging)) {
- virtio_blk_submit_multireq(s->blk, mrb);
+ virtio_blk_submit_multireq(s, mrb);
}
assert(mrb->num_reqs < VIRTIO_BLK_MAX_MERGE_REQS);
@@ -774,7 +783,7 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
} while (!virtio_queue_empty(vq));
if (mrb.num_reqs) {
- virtio_blk_submit_multireq(s->blk, &mrb);
+ virtio_blk_submit_multireq(s, &mrb);
}
blk_io_unplug(s->blk);
@@ -823,7 +832,7 @@ void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh)
}
if (mrb.num_reqs) {
- virtio_blk_submit_multireq(s->blk, &mrb);
+ virtio_blk_submit_multireq(s, &mrb);
}
if (is_bh) {
blk_dec_in_flight(s->conf.conf.blk);
@@ -1205,6 +1214,7 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
}
s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s);
+ blk_ram_registrar_init(&s->blk_ram_registrar, s->blk);
blk_set_dev_ops(s->blk, &virtio_block_ops, s);
blk_iostatus_enable(s->blk);
@@ -1230,6 +1240,7 @@ static void virtio_blk_device_unrealize(DeviceState *dev)
virtio_del_queue(vdev, i);
}
qemu_coroutine_dec_pool_size(conf->num_queues * conf->queue_size / 2);
+ blk_ram_registrar_destroy(&s->blk_ram_registrar);
qemu_del_vm_change_state_handler(s->change);
blockdev_mark_auto_del(s->blk);
virtio_cleanup(vdev);
--
2.37.3
More information about the integration
mailing list