Commit 60d21aac authored by Kanchan Joshi's avatar Kanchan Joshi Committed by Jens Axboe

block: support PI at non-zero offset within metadata

Block layer integrity processing assumes that protection information
(PI) is placed in the first bytes of each metadata block.

Remove this limitation and include the metadata before the PI in the
calculation of the guard tag.
Signed-off-by: default avatarKanchan Joshi <joshi.k@samsung.com>
Signed-off-by: default avatarChinmay Gameti <c.gameti@samsung.com>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Link: https://lore.kernel.org/r/20240201130126.211402-3-joshi.k@samsung.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6b5c132a
...@@ -395,6 +395,7 @@ static blk_status_t bio_integrity_process(struct bio *bio, ...@@ -395,6 +395,7 @@ static blk_status_t bio_integrity_process(struct bio *bio,
iter.tuple_size = bi->tuple_size; iter.tuple_size = bi->tuple_size;
iter.seed = proc_iter->bi_sector; iter.seed = proc_iter->bi_sector;
iter.prot_buf = bvec_virt(bip->bip_vec); iter.prot_buf = bvec_virt(bip->bip_vec);
iter.pi_offset = bi->pi_offset;
__bio_for_each_segment(bv, bio, bviter, *proc_iter) { __bio_for_each_segment(bv, bio, bviter, *proc_iter) {
void *kaddr = bvec_kmap_local(&bv); void *kaddr = bvec_kmap_local(&bv);
......
...@@ -370,6 +370,7 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template ...@@ -370,6 +370,7 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template
bi->profile = template->profile ? template->profile : &nop_profile; bi->profile = template->profile ? template->profile : &nop_profile;
bi->tuple_size = template->tuple_size; bi->tuple_size = template->tuple_size;
bi->tag_size = template->tag_size; bi->tag_size = template->tag_size;
bi->pi_offset = template->pi_offset;
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue); blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue);
......
...@@ -32,12 +32,16 @@ static __be16 t10_pi_ip_fn(__be16 csum, void *data, unsigned int len) ...@@ -32,12 +32,16 @@ static __be16 t10_pi_ip_fn(__be16 csum, void *data, unsigned int len)
static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter, static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
csum_fn *fn, enum t10_dif_type type) csum_fn *fn, enum t10_dif_type type)
{ {
u8 offset = iter->pi_offset;
unsigned int i; unsigned int i;
for (i = 0 ; i < iter->data_size ; i += iter->interval) { for (i = 0 ; i < iter->data_size ; i += iter->interval) {
struct t10_pi_tuple *pi = iter->prot_buf; struct t10_pi_tuple *pi = iter->prot_buf + offset;
pi->guard_tag = fn(0, iter->data_buf, iter->interval); pi->guard_tag = fn(0, iter->data_buf, iter->interval);
if (offset)
pi->guard_tag = fn(pi->guard_tag, iter->prot_buf,
offset);
pi->app_tag = 0; pi->app_tag = 0;
if (type == T10_PI_TYPE1_PROTECTION) if (type == T10_PI_TYPE1_PROTECTION)
...@@ -56,12 +60,13 @@ static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter, ...@@ -56,12 +60,13 @@ static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter, static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
csum_fn *fn, enum t10_dif_type type) csum_fn *fn, enum t10_dif_type type)
{ {
u8 offset = iter->pi_offset;
unsigned int i; unsigned int i;
BUG_ON(type == T10_PI_TYPE0_PROTECTION); BUG_ON(type == T10_PI_TYPE0_PROTECTION);
for (i = 0 ; i < iter->data_size ; i += iter->interval) { for (i = 0 ; i < iter->data_size ; i += iter->interval) {
struct t10_pi_tuple *pi = iter->prot_buf; struct t10_pi_tuple *pi = iter->prot_buf + offset;
__be16 csum; __be16 csum;
if (type == T10_PI_TYPE1_PROTECTION || if (type == T10_PI_TYPE1_PROTECTION ||
...@@ -84,6 +89,8 @@ static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter, ...@@ -84,6 +89,8 @@ static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
} }
csum = fn(0, iter->data_buf, iter->interval); csum = fn(0, iter->data_buf, iter->interval);
if (offset)
csum = fn(csum, iter->prot_buf, offset);
if (pi->guard_tag != csum) { if (pi->guard_tag != csum) {
pr_err("%s: guard tag error at sector %llu " \ pr_err("%s: guard tag error at sector %llu " \
...@@ -134,8 +141,10 @@ static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter) ...@@ -134,8 +141,10 @@ static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
*/ */
static void t10_pi_type1_prepare(struct request *rq) static void t10_pi_type1_prepare(struct request *rq)
{ {
const int tuple_sz = rq->q->integrity.tuple_size; struct blk_integrity *bi = &rq->q->integrity;
const int tuple_sz = bi->tuple_size;
u32 ref_tag = t10_pi_ref_tag(rq); u32 ref_tag = t10_pi_ref_tag(rq);
u8 offset = bi->pi_offset;
struct bio *bio; struct bio *bio;
__rq_for_each_bio(bio, rq) { __rq_for_each_bio(bio, rq) {
...@@ -154,7 +163,7 @@ static void t10_pi_type1_prepare(struct request *rq) ...@@ -154,7 +163,7 @@ static void t10_pi_type1_prepare(struct request *rq)
p = bvec_kmap_local(&iv); p = bvec_kmap_local(&iv);
for (j = 0; j < iv.bv_len; j += tuple_sz) { for (j = 0; j < iv.bv_len; j += tuple_sz) {
struct t10_pi_tuple *pi = p; struct t10_pi_tuple *pi = p + offset;
if (be32_to_cpu(pi->ref_tag) == virt) if (be32_to_cpu(pi->ref_tag) == virt)
pi->ref_tag = cpu_to_be32(ref_tag); pi->ref_tag = cpu_to_be32(ref_tag);
...@@ -183,9 +192,11 @@ static void t10_pi_type1_prepare(struct request *rq) ...@@ -183,9 +192,11 @@ static void t10_pi_type1_prepare(struct request *rq)
*/ */
static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes) static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
{ {
unsigned intervals = nr_bytes >> rq->q->integrity.interval_exp; struct blk_integrity *bi = &rq->q->integrity;
const int tuple_sz = rq->q->integrity.tuple_size; unsigned intervals = nr_bytes >> bi->interval_exp;
const int tuple_sz = bi->tuple_size;
u32 ref_tag = t10_pi_ref_tag(rq); u32 ref_tag = t10_pi_ref_tag(rq);
u8 offset = bi->pi_offset;
struct bio *bio; struct bio *bio;
__rq_for_each_bio(bio, rq) { __rq_for_each_bio(bio, rq) {
...@@ -200,7 +211,7 @@ static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes) ...@@ -200,7 +211,7 @@ static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
p = bvec_kmap_local(&iv); p = bvec_kmap_local(&iv);
for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) { for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
struct t10_pi_tuple *pi = p; struct t10_pi_tuple *pi = p + offset;
if (be32_to_cpu(pi->ref_tag) == ref_tag) if (be32_to_cpu(pi->ref_tag) == ref_tag)
pi->ref_tag = cpu_to_be32(virt); pi->ref_tag = cpu_to_be32(virt);
...@@ -288,12 +299,16 @@ static __be64 ext_pi_crc64(u64 crc, void *data, unsigned int len) ...@@ -288,12 +299,16 @@ static __be64 ext_pi_crc64(u64 crc, void *data, unsigned int len)
static blk_status_t ext_pi_crc64_generate(struct blk_integrity_iter *iter, static blk_status_t ext_pi_crc64_generate(struct blk_integrity_iter *iter,
enum t10_dif_type type) enum t10_dif_type type)
{ {
u8 offset = iter->pi_offset;
unsigned int i; unsigned int i;
for (i = 0 ; i < iter->data_size ; i += iter->interval) { for (i = 0 ; i < iter->data_size ; i += iter->interval) {
struct crc64_pi_tuple *pi = iter->prot_buf; struct crc64_pi_tuple *pi = iter->prot_buf + offset;
pi->guard_tag = ext_pi_crc64(0, iter->data_buf, iter->interval); pi->guard_tag = ext_pi_crc64(0, iter->data_buf, iter->interval);
if (offset)
pi->guard_tag = ext_pi_crc64(be64_to_cpu(pi->guard_tag),
iter->prot_buf, offset);
pi->app_tag = 0; pi->app_tag = 0;
if (type == T10_PI_TYPE1_PROTECTION) if (type == T10_PI_TYPE1_PROTECTION)
...@@ -319,10 +334,11 @@ static bool ext_pi_ref_escape(u8 *ref_tag) ...@@ -319,10 +334,11 @@ static bool ext_pi_ref_escape(u8 *ref_tag)
static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter, static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter,
enum t10_dif_type type) enum t10_dif_type type)
{ {
u8 offset = iter->pi_offset;
unsigned int i; unsigned int i;
for (i = 0; i < iter->data_size; i += iter->interval) { for (i = 0; i < iter->data_size; i += iter->interval) {
struct crc64_pi_tuple *pi = iter->prot_buf; struct crc64_pi_tuple *pi = iter->prot_buf + offset;
u64 ref, seed; u64 ref, seed;
__be64 csum; __be64 csum;
...@@ -344,6 +360,10 @@ static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter, ...@@ -344,6 +360,10 @@ static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter,
} }
csum = ext_pi_crc64(0, iter->data_buf, iter->interval); csum = ext_pi_crc64(0, iter->data_buf, iter->interval);
if (offset)
csum = ext_pi_crc64(be64_to_cpu(csum), iter->prot_buf,
offset);
if (pi->guard_tag != csum) { if (pi->guard_tag != csum) {
pr_err("%s: guard tag error at sector %llu " \ pr_err("%s: guard tag error at sector %llu " \
"(rcvd %016llx, want %016llx)\n", "(rcvd %016llx, want %016llx)\n",
...@@ -373,8 +393,10 @@ static blk_status_t ext_pi_type1_generate_crc64(struct blk_integrity_iter *iter) ...@@ -373,8 +393,10 @@ static blk_status_t ext_pi_type1_generate_crc64(struct blk_integrity_iter *iter)
static void ext_pi_type1_prepare(struct request *rq) static void ext_pi_type1_prepare(struct request *rq)
{ {
const int tuple_sz = rq->q->integrity.tuple_size; struct blk_integrity *bi = &rq->q->integrity;
const int tuple_sz = bi->tuple_size;
u64 ref_tag = ext_pi_ref_tag(rq); u64 ref_tag = ext_pi_ref_tag(rq);
u8 offset = bi->pi_offset;
struct bio *bio; struct bio *bio;
__rq_for_each_bio(bio, rq) { __rq_for_each_bio(bio, rq) {
...@@ -393,7 +415,7 @@ static void ext_pi_type1_prepare(struct request *rq) ...@@ -393,7 +415,7 @@ static void ext_pi_type1_prepare(struct request *rq)
p = bvec_kmap_local(&iv); p = bvec_kmap_local(&iv);
for (j = 0; j < iv.bv_len; j += tuple_sz) { for (j = 0; j < iv.bv_len; j += tuple_sz) {
struct crc64_pi_tuple *pi = p; struct crc64_pi_tuple *pi = p + offset;
u64 ref = get_unaligned_be48(pi->ref_tag); u64 ref = get_unaligned_be48(pi->ref_tag);
if (ref == virt) if (ref == virt)
...@@ -411,9 +433,11 @@ static void ext_pi_type1_prepare(struct request *rq) ...@@ -411,9 +433,11 @@ static void ext_pi_type1_prepare(struct request *rq)
static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes) static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
{ {
unsigned intervals = nr_bytes >> rq->q->integrity.interval_exp; struct blk_integrity *bi = &rq->q->integrity;
const int tuple_sz = rq->q->integrity.tuple_size; unsigned intervals = nr_bytes >> bi->interval_exp;
const int tuple_sz = bi->tuple_size;
u64 ref_tag = ext_pi_ref_tag(rq); u64 ref_tag = ext_pi_ref_tag(rq);
u8 offset = bi->pi_offset;
struct bio *bio; struct bio *bio;
__rq_for_each_bio(bio, rq) { __rq_for_each_bio(bio, rq) {
...@@ -428,7 +452,7 @@ static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes) ...@@ -428,7 +452,7 @@ static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
p = bvec_kmap_local(&iv); p = bvec_kmap_local(&iv);
for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) { for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
struct crc64_pi_tuple *pi = p; struct crc64_pi_tuple *pi = p + offset;
u64 ref = get_unaligned_be48(pi->ref_tag); u64 ref = get_unaligned_be48(pi->ref_tag);
if (ref == ref_tag) if (ref == ref_tag)
......
...@@ -20,6 +20,7 @@ struct blk_integrity_iter { ...@@ -20,6 +20,7 @@ struct blk_integrity_iter {
unsigned int data_size; unsigned int data_size;
unsigned short interval; unsigned short interval;
unsigned char tuple_size; unsigned char tuple_size;
unsigned char pi_offset;
const char *disk_name; const char *disk_name;
}; };
......
...@@ -108,6 +108,7 @@ struct blk_integrity { ...@@ -108,6 +108,7 @@ struct blk_integrity {
const struct blk_integrity_profile *profile; const struct blk_integrity_profile *profile;
unsigned char flags; unsigned char flags;
unsigned char tuple_size; unsigned char tuple_size;
unsigned char pi_offset;
unsigned char interval_exp; unsigned char interval_exp;
unsigned char tag_size; unsigned char tag_size;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment