Commit 9eb0432b authored by Harish Chegondi's avatar Harish Chegondi Committed by Doug Ledford

staging/hfi1: Move s_sde to read mostly section of hfi1_qp

This would reduce L2 cache misses on s_sde in the _hfi1_schedule_send
function when invoked from post_send thereby improving performance of
post_send.
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarHarish Chegondi <harish.chegondi@intel.com>
Signed-off-by: default avatarJubin John <jubin.john@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 349ac71f
...@@ -441,6 +441,7 @@ struct hfi1_qp { ...@@ -441,6 +441,7 @@ struct hfi1_qp {
struct hfi1_swqe *s_wq; /* send work queue */ struct hfi1_swqe *s_wq; /* send work queue */
struct hfi1_mmap_info *ip; struct hfi1_mmap_info *ip;
struct ahg_ib_header *s_hdr; /* next packet header to send */ struct ahg_ib_header *s_hdr; /* next packet header to send */
struct sdma_engine *s_sde; /* current sde */
/* sc for UC/RC QPs - based on ah for UD */ /* sc for UC/RC QPs - based on ah for UD */
u8 s_sc; u8 s_sc;
unsigned long timeout_jiffies; /* computed from timeout */ unsigned long timeout_jiffies; /* computed from timeout */
...@@ -506,7 +507,6 @@ struct hfi1_qp { ...@@ -506,7 +507,6 @@ struct hfi1_qp {
struct hfi1_swqe *s_wqe; struct hfi1_swqe *s_wqe;
struct hfi1_sge_state s_sge; /* current send request data */ struct hfi1_sge_state s_sge; /* current send request data */
struct hfi1_mregion *s_rdma_mr; struct hfi1_mregion *s_rdma_mr;
struct sdma_engine *s_sde; /* current sde */
u32 s_cur_size; /* size of send packet in bytes */ u32 s_cur_size; /* size of send packet in bytes */
u32 s_len; /* total length of s_sge */ u32 s_len; /* total length of s_sge */
u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment