Commit 80bfa2f6 authored by Roger Pau Monne's avatar Roger Pau Monne Committed by Konrad Rzeszutek Wilk

xen-blkif: drop struct blkif_request_segment_aligned

This was wrongly introduced in commit 402b27f9, the only difference
between blkif_request_segment_aligned and blkif_request_segment is
that the former has a named padding, while both share the same
memory layout.

Also correct a few minor glitches in the description, including for it
to no longer assume PAGE_SIZE == 4096.
Signed-off-by: default avatarRoger Pau Monné <roger.pau@citrix.com>
[Description fix by Jan Beulich]
Signed-off-by: default avatarJan Beulich <jbeulich@suse.com>
Reported-by: default avatarJan Beulich <jbeulich@suse.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Tested-by: default avatarMatt Rushton <mrushton@amazon.com>
Cc: Matt Wilson <msw@amazon.com>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent c05f3e3c
...@@ -847,7 +847,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, ...@@ -847,7 +847,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
struct grant_page **pages = pending_req->indirect_pages; struct grant_page **pages = pending_req->indirect_pages;
struct xen_blkif *blkif = pending_req->blkif; struct xen_blkif *blkif = pending_req->blkif;
int indirect_grefs, rc, n, nseg, i; int indirect_grefs, rc, n, nseg, i;
struct blkif_request_segment_aligned *segments = NULL; struct blkif_request_segment *segments = NULL;
nseg = pending_req->nr_pages; nseg = pending_req->nr_pages;
indirect_grefs = INDIRECT_PAGES(nseg); indirect_grefs = INDIRECT_PAGES(nseg);
......
...@@ -57,7 +57,7 @@ ...@@ -57,7 +57,7 @@
#define MAX_INDIRECT_SEGMENTS 256 #define MAX_INDIRECT_SEGMENTS 256
#define SEGS_PER_INDIRECT_FRAME \ #define SEGS_PER_INDIRECT_FRAME \
(PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) (PAGE_SIZE/sizeof(struct blkif_request_segment))
#define MAX_INDIRECT_PAGES \ #define MAX_INDIRECT_PAGES \
((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
#define INDIRECT_PAGES(_segs) \ #define INDIRECT_PAGES(_segs) \
......
...@@ -162,7 +162,7 @@ static DEFINE_SPINLOCK(minor_lock); ...@@ -162,7 +162,7 @@ static DEFINE_SPINLOCK(minor_lock);
#define DEV_NAME "xvd" /* name in /dev */ #define DEV_NAME "xvd" /* name in /dev */
#define SEGS_PER_INDIRECT_FRAME \ #define SEGS_PER_INDIRECT_FRAME \
(PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) (PAGE_SIZE/sizeof(struct blkif_request_segment))
#define INDIRECT_GREFS(_segs) \ #define INDIRECT_GREFS(_segs) \
((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
...@@ -393,7 +393,7 @@ static int blkif_queue_request(struct request *req) ...@@ -393,7 +393,7 @@ static int blkif_queue_request(struct request *req)
unsigned long id; unsigned long id;
unsigned int fsect, lsect; unsigned int fsect, lsect;
int i, ref, n; int i, ref, n;
struct blkif_request_segment_aligned *segments = NULL; struct blkif_request_segment *segments = NULL;
/* /*
* Used to store if we are able to queue the request by just using * Used to store if we are able to queue the request by just using
...@@ -550,7 +550,7 @@ static int blkif_queue_request(struct request *req) ...@@ -550,7 +550,7 @@ static int blkif_queue_request(struct request *req)
} else { } else {
n = i % SEGS_PER_INDIRECT_FRAME; n = i % SEGS_PER_INDIRECT_FRAME;
segments[n] = segments[n] =
(struct blkif_request_segment_aligned) { (struct blkif_request_segment) {
.gref = ref, .gref = ref,
.first_sect = fsect, .first_sect = fsect,
.last_sect = lsect }; .last_sect = lsect };
......
...@@ -113,13 +113,13 @@ typedef uint64_t blkif_sector_t; ...@@ -113,13 +113,13 @@ typedef uint64_t blkif_sector_t;
* it's less than the number provided by the backend. The indirect_grefs field * it's less than the number provided by the backend. The indirect_grefs field
* in blkif_request_indirect should be filled by the frontend with the * in blkif_request_indirect should be filled by the frontend with the
* grant references of the pages that are holding the indirect segments. * grant references of the pages that are holding the indirect segments.
* This pages are filled with an array of blkif_request_segment_aligned * These pages are filled with an array of blkif_request_segment that hold the
* that hold the information about the segments. The number of indirect * information about the segments. The number of indirect pages to use is
* pages to use is determined by the maximum number of segments * determined by the number of segments an indirect request contains. Every
* a indirect request contains. Every indirect page can contain a maximum * indirect page can contain a maximum of
* of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)), * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to
* so to calculate the number of indirect pages to use we have to do * calculate the number of indirect pages to use we have to do
* ceil(indirect_segments/512). * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))).
* *
* If a backend does not recognize BLKIF_OP_INDIRECT, it should *not* * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
* create the "feature-max-indirect-segments" node! * create the "feature-max-indirect-segments" node!
...@@ -135,13 +135,12 @@ typedef uint64_t blkif_sector_t; ...@@ -135,13 +135,12 @@ typedef uint64_t blkif_sector_t;
#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
struct blkif_request_segment_aligned { struct blkif_request_segment {
grant_ref_t gref; /* reference to I/O buffer frame */ grant_ref_t gref; /* reference to I/O buffer frame */
/* @first_sect: first sector in frame to transfer (inclusive). */ /* @first_sect: first sector in frame to transfer (inclusive). */
/* @last_sect: last sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */
uint8_t first_sect, last_sect; uint8_t first_sect, last_sect;
uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */ };
} __attribute__((__packed__));
struct blkif_request_rw { struct blkif_request_rw {
uint8_t nr_segments; /* number of segments */ uint8_t nr_segments; /* number of segments */
...@@ -151,12 +150,7 @@ struct blkif_request_rw { ...@@ -151,12 +150,7 @@ struct blkif_request_rw {
#endif #endif
uint64_t id; /* private guest value, echoed in resp */ uint64_t id; /* private guest value, echoed in resp */
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
struct blkif_request_segment { struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
grant_ref_t gref; /* reference to I/O buffer frame */
/* @first_sect: first sector in frame to transfer (inclusive). */
/* @last_sect: last sector in frame to transfer (inclusive). */
uint8_t first_sect, last_sect;
} seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
} __attribute__((__packed__)); } __attribute__((__packed__));
struct blkif_request_discard { struct blkif_request_discard {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment