Commit 6f4ad14f authored by Avri Altman's avatar Avri Altman Committed by Martin K. Petersen

scsi: ufs: ufshpb: Region inactivation in host mode

In host mode, the host is expected to send HPB WRITE BUFFER with buffer-id
= 0x1 when it inactivates a region.

Use the map-requests pool as there is no point in assigning a designated
cache for umap-requests.

[mkp: REQ_OP_DRV_*]

Link: https://lore.kernel.org/r/20210712095039.8093-7-avri.altman@wdc.comReviewed-by: default avatarDaejun Park <daejun7.park@samsung.com>
Signed-off-by: default avatarAvri Altman <avri.altman@wdc.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 6c59cb50
...@@ -692,7 +692,8 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) ...@@ -692,7 +692,8 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
} }
static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb, static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
int rgn_idx, enum req_opf dir) int rgn_idx, enum req_opf dir,
bool atomic)
{ {
struct ufshpb_req *rq; struct ufshpb_req *rq;
struct request *req; struct request *req;
...@@ -706,7 +707,7 @@ static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb, ...@@ -706,7 +707,7 @@ static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir, req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir,
BLK_MQ_REQ_NOWAIT); BLK_MQ_REQ_NOWAIT);
if ((PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) { if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
usleep_range(3000, 3100); usleep_range(3000, 3100);
goto retry; goto retry;
} }
...@@ -737,7 +738,7 @@ static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb, ...@@ -737,7 +738,7 @@ static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
struct ufshpb_req *map_req; struct ufshpb_req *map_req;
struct bio *bio; struct bio *bio;
map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN); map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false);
if (!map_req) if (!map_req)
return NULL; return NULL;
...@@ -914,6 +915,8 @@ static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb, ...@@ -914,6 +915,8 @@ static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH; rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
blk_execute_rq_nowait(NULL, req, 1, ufshpb_umap_req_compl_fn); blk_execute_rq_nowait(NULL, req, 1, ufshpb_umap_req_compl_fn);
hpb->stats.umap_req_cnt++;
} }
static int ufshpb_execute_map_req(struct ufshpb_lu *hpb, static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
...@@ -1090,12 +1093,13 @@ static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb, ...@@ -1090,12 +1093,13 @@ static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
} }
static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb, static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
struct ufshpb_region *rgn) struct ufshpb_region *rgn,
bool atomic)
{ {
struct ufshpb_req *umap_req; struct ufshpb_req *umap_req;
int rgn_idx = rgn ? rgn->rgn_idx : 0; int rgn_idx = rgn ? rgn->rgn_idx : 0;
umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT); umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic);
if (!umap_req) if (!umap_req)
return -ENOMEM; return -ENOMEM;
...@@ -1104,13 +1108,19 @@ static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb, ...@@ -1104,13 +1108,19 @@ static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
return 0; return 0;
} }
static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
struct ufshpb_region *rgn)
{
return ufshpb_issue_umap_req(hpb, rgn, true);
}
static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb) static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb)
{ {
return ufshpb_issue_umap_req(hpb, NULL); return ufshpb_issue_umap_req(hpb, NULL, false);
} }
static void __ufshpb_evict_region(struct ufshpb_lu *hpb, static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
struct ufshpb_region *rgn) struct ufshpb_region *rgn)
{ {
struct victim_select_info *lru_info; struct victim_select_info *lru_info;
struct ufshpb_subregion *srgn; struct ufshpb_subregion *srgn;
...@@ -1145,6 +1155,14 @@ static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) ...@@ -1145,6 +1155,14 @@ static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
goto out; goto out;
} }
if (hpb->is_hcm) {
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
ret = ufshpb_issue_umap_single_req(hpb, rgn);
spin_lock_irqsave(&hpb->rgn_state_lock, flags);
if (ret)
goto out;
}
__ufshpb_evict_region(hpb, rgn); __ufshpb_evict_region(hpb, rgn);
} }
out: out:
...@@ -1279,6 +1297,18 @@ static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) ...@@ -1279,6 +1297,18 @@ static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
"LRU full (%d), choose victim %d\n", "LRU full (%d), choose victim %d\n",
atomic_read(&lru_info->active_cnt), atomic_read(&lru_info->active_cnt),
victim_rgn->rgn_idx); victim_rgn->rgn_idx);
if (hpb->is_hcm) {
spin_unlock_irqrestore(&hpb->rgn_state_lock,
flags);
ret = ufshpb_issue_umap_single_req(hpb,
victim_rgn);
spin_lock_irqsave(&hpb->rgn_state_lock,
flags);
if (ret)
goto out;
}
__ufshpb_evict_region(hpb, victim_rgn); __ufshpb_evict_region(hpb, victim_rgn);
} }
...@@ -1848,6 +1878,7 @@ ufshpb_sysfs_attr_show_func(rb_noti_cnt); ...@@ -1848,6 +1878,7 @@ ufshpb_sysfs_attr_show_func(rb_noti_cnt);
ufshpb_sysfs_attr_show_func(rb_active_cnt); ufshpb_sysfs_attr_show_func(rb_active_cnt);
ufshpb_sysfs_attr_show_func(rb_inactive_cnt); ufshpb_sysfs_attr_show_func(rb_inactive_cnt);
ufshpb_sysfs_attr_show_func(map_req_cnt); ufshpb_sysfs_attr_show_func(map_req_cnt);
ufshpb_sysfs_attr_show_func(umap_req_cnt);
static struct attribute *hpb_dev_stat_attrs[] = { static struct attribute *hpb_dev_stat_attrs[] = {
&dev_attr_hit_cnt.attr, &dev_attr_hit_cnt.attr,
...@@ -1856,6 +1887,7 @@ static struct attribute *hpb_dev_stat_attrs[] = { ...@@ -1856,6 +1887,7 @@ static struct attribute *hpb_dev_stat_attrs[] = {
&dev_attr_rb_active_cnt.attr, &dev_attr_rb_active_cnt.attr,
&dev_attr_rb_inactive_cnt.attr, &dev_attr_rb_inactive_cnt.attr,
&dev_attr_map_req_cnt.attr, &dev_attr_map_req_cnt.attr,
&dev_attr_umap_req_cnt.attr,
NULL, NULL,
}; };
...@@ -1981,6 +2013,7 @@ static void ufshpb_stat_init(struct ufshpb_lu *hpb) ...@@ -1981,6 +2013,7 @@ static void ufshpb_stat_init(struct ufshpb_lu *hpb)
hpb->stats.rb_active_cnt = 0; hpb->stats.rb_active_cnt = 0;
hpb->stats.rb_inactive_cnt = 0; hpb->stats.rb_inactive_cnt = 0;
hpb->stats.map_req_cnt = 0; hpb->stats.map_req_cnt = 0;
hpb->stats.umap_req_cnt = 0;
} }
static void ufshpb_param_init(struct ufshpb_lu *hpb) static void ufshpb_param_init(struct ufshpb_lu *hpb)
......
...@@ -191,6 +191,7 @@ struct ufshpb_stats { ...@@ -191,6 +191,7 @@ struct ufshpb_stats {
u64 rb_inactive_cnt; u64 rb_inactive_cnt;
u64 map_req_cnt; u64 map_req_cnt;
u64 pre_req_cnt; u64 pre_req_cnt;
u64 umap_req_cnt;
}; };
struct ufshpb_lu { struct ufshpb_lu {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment