qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PULL 09/20] block: use int64_t as bytes type in tracked requests


From: Eric Blake
Subject: [PULL 09/20] block: use int64_t as bytes type in tracked requests
Date: Tue, 2 Feb 2021 16:45:18 -0600

From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>

We are generally moving to int64_t for both offset and bytes parameters
on all io paths.

Main motivation is realization of 64-bit write_zeroes operation for
fast zeroing large disk chunks, up to the whole disk.

We chose signed type, to be consistent with off_t (which is signed) and
with possibility for signed return type (where negative value means
error).

All requests in block/io must not overflow BDRV_MAX_LENGTH, all
external users of BdrvTrackedRequest already have corresponding
assertions, so we are safe. Add some assertions still.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20201211183934.169161-9-vsementsov@virtuozzo.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
---
 include/block/block_int.h |  4 ++--
 block/io.c                | 14 +++++++----
 block/io.c.rej            | 50 ++++++++++-----------------------------
 3 files changed, 24 insertions(+), 44 deletions(-)

diff --git a/include/block/block_int.h b/include/block/block_int.h
index 5bbbf9ee0af9..7f41f0990cc0 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -79,12 +79,12 @@ enum BdrvTrackedRequestType {
 typedef struct BdrvTrackedRequest {
     BlockDriverState *bs;
     int64_t offset;
-    uint64_t bytes;
+    int64_t bytes;
     enum BdrvTrackedRequestType type;

     bool serialising;
     int64_t overlap_offset;
-    uint64_t overlap_bytes;
+    int64_t overlap_bytes;

     QLIST_ENTRY(BdrvTrackedRequest) list;
     Coroutine *co; /* owner, used for deadlock detection */
diff --git a/block/io.c b/block/io.c
index b56db913da30..1c23587d18c6 100644
--- a/block/io.c
+++ b/block/io.c
@@ -717,10 +717,10 @@ static void tracked_request_end(BdrvTrackedRequest *req)
 static void tracked_request_begin(BdrvTrackedRequest *req,
                                   BlockDriverState *bs,
                                   int64_t offset,
-                                  uint64_t bytes,
+                                  int64_t bytes,
                                   enum BdrvTrackedRequestType type)
 {
-    assert(bytes <= INT64_MAX && offset <= INT64_MAX - bytes);
+    bdrv_check_request(offset, bytes, &error_abort);

     *req = (BdrvTrackedRequest){
         .bs = bs,
@@ -741,8 +741,10 @@ static void tracked_request_begin(BdrvTrackedRequest *req,
 }

 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
-                                     int64_t offset, uint64_t bytes)
+                                     int64_t offset, int64_t bytes)
 {
+    bdrv_check_request(offset, bytes, &error_abort);
+
     /*        aaaa   bbbb */
     if (offset >= req->overlap_offset + req->overlap_bytes) {
         return false;
@@ -810,8 +812,10 @@ static void 
tracked_request_set_serialising(BdrvTrackedRequest *req,
                                             uint64_t align)
 {
     int64_t overlap_offset = req->offset & ~(align - 1);
-    uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
-                               - overlap_offset;
+    int64_t overlap_bytes =
+        ROUND_UP(req->offset + req->bytes, align) - overlap_offset;
+
+    bdrv_check_request(req->offset, req->bytes, &error_abort);

     if (!req->serialising) {
         qatomic_inc(&req->bs->serialising_in_flight);
diff --git a/block/io.c.rej b/block/io.c.rej
index f52df016263b..ae2f972d1af9 100644
--- a/block/io.c.rej
+++ b/block/io.c.rej
@@ -1,40 +1,16 @@
 diff a/block/io.c b/block/io.c (rejected hunks)
-@@ -2138,20 +2139,32 @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
-         return 0;
-     }
+@@ -798,10 +800,12 @@ bool bdrv_mark_request_serialising(BdrvTrackedRequest 
*req, uint64_t align)
+ {
+     BlockDriverState *bs = req->bs;
+     int64_t overlap_offset = req->offset & ~(align - 1);
+-    uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
+-                               - overlap_offset;
++    int64_t overlap_bytes =
++        ROUND_UP(req->offset + req->bytes, align) - overlap_offset;
+     bool waited;
  
-+    if (!(flags & BDRV_REQ_ZERO_WRITE)) {
-+        /*
-+         * Pad request for following read-modify-write cycle.
-+         * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do
-+         * alignment only if there is no ZERO flag.
-+         */
-+        padded = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes,
-+                                  &pad);
-+    }
++    bdrv_check_request(req->offset, req->bytes, &error_abort);
 +
-     bdrv_inc_in_flight(bs);
--    /*
--     * Align write if necessary by performing a read-modify-write cycle.
--     * Pad qiov with the read parts and be sure to have a tracked request not
--     * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
--     */
-     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
- 
-     if (flags & BDRV_REQ_ZERO_WRITE) {
-+        assert(!padded);
-         ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
-         goto out;
-     }
- 
--    if (bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad)) {
-+    if (padded) {
-+        /*
-+         * Request was unaligned to request_alignment and therefore padded.
-+         * We are going to do read-modify-write. User is not prepared to 
widened
-+         * request intersections with other requests, so we serialize the
-+         * request.
-+         */
-         bdrv_mark_request_serialising(&req, align);
-         bdrv_padding_rmw_read(child, &req, &pad, false);
-     }
+     qemu_co_mutex_lock(&bs->reqs_lock);
+     if (!req->serialising) {
+         qatomic_inc(&req->bs->serialising_in_flight);
-- 
2.30.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]