Commit e527ff92 authored by Doug Ledford's avatar Doug Ledford

Merge branch 'hfi1' into k.o/for-next

Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parents bd8c2021 f9586abf
......@@ -9185,25 +9185,6 @@ static int do_quick_linkup(struct hfi1_devdata *dd)
return 0; /* success */
}
/*
* Set the SerDes to internal loopback mode.
* Returns 0 on success, -errno on error.
*/
static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
{
int ret;
ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
if (ret == HCMD_SUCCESS)
return 0;
dd_dev_err(dd,
"Set physical link state to SerDes Loopback failed with return %d\n",
ret);
if (ret >= 0)
ret = -EINVAL;
return ret;
}
/*
* Do all special steps to set up loopback.
*/
......@@ -9229,13 +9210,11 @@ static int init_loopback(struct hfi1_devdata *dd)
return 0;
}
/* handle serdes loopback */
if (loopback == LOOPBACK_SERDES) {
/* internal serdes loopack needs quick linkup on RTL */
if (dd->icode == ICODE_RTL_SILICON)
quick_linkup = 1;
return set_serdes_loopback_mode(dd);
}
/*
* SerDes loopback init sequence is handled in set_local_link_attributes
*/
if (loopback == LOOPBACK_SERDES)
return 0;
/* LCB loopback - handled at poll time */
if (loopback == LOOPBACK_LCB) {
......@@ -9294,7 +9273,7 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
u8 tx_polarity_inversion;
u8 rx_polarity_inversion;
int ret;
u32 misc_bits = 0;
/* reset our fabric serdes to clear any lingering problems */
fabric_serdes_reset(dd);
......@@ -9340,7 +9319,14 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
if (ret != HCMD_SUCCESS)
goto set_local_link_attributes_fail;
ret = write_vc_local_link_width(dd, 0, 0,
/*
* SerDes loopback init sequence requires
* setting bit 0 of MISC_CONFIG_BITS
*/
if (loopback == LOOPBACK_SERDES)
misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
ret = write_vc_local_link_width(dd, misc_bits, 0,
opa_to_vc_link_widths(
ppd->link_width_enabled));
if (ret != HCMD_SUCCESS)
......
......@@ -582,6 +582,9 @@ enum {
#define LOOPBACK_LCB 2
#define LOOPBACK_CABLE 3 /* external cable */
/* set up serdes bit in MISC_CONFIG_BITS */
#define LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT 0
/* read and write hardware registers */
u64 read_csr(const struct hfi1_devdata *dd, u32 offset);
void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value);
......
......@@ -328,6 +328,7 @@ struct diag_pkt {
#define SC15_PACKET 0xF
#define SIZE_OF_CRC 1
#define SIZE_OF_LT 1
#define MAX_16B_PADDING 12 /* CRC = 4, LT = 1, Pad = 0 to 7 bytes */
#define LIM_MGMT_P_KEY 0x7FFF
#define FULL_MGMT_P_KEY 0xFFFF
......
......@@ -1424,7 +1424,14 @@ int acquire_hw_mutex(struct hfi1_devdata *dd)
unsigned long timeout;
int try = 0;
u8 mask = 1 << dd->hfi1_id;
u8 user;
u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
if (user == mask) {
dd_dev_info(dd,
"Hardware mutex already acquired, mutex mask %u\n",
(u32)mask);
return 0;
}
retry:
timeout = msecs_to_jiffies(HM_TIMEOUT) + jiffies;
......@@ -1455,7 +1462,15 @@ int acquire_hw_mutex(struct hfi1_devdata *dd)
void release_hw_mutex(struct hfi1_devdata *dd)
{
write_csr(dd, ASIC_CFG_MUTEX, 0);
u8 mask = 1 << dd->hfi1_id;
u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
if (user != mask)
dd_dev_warn(dd,
"Unable to release hardware mutex, mutex mask %u, my mask %u\n",
(u32)user, (u32)mask);
else
write_csr(dd, ASIC_CFG_MUTEX, 0);
}
/* return the given resource bit(s) as a mask for the given HFI */
......@@ -1770,7 +1785,7 @@ static int check_meta_version(struct hfi1_devdata *dd, u32 *system_table)
ver_start /= 8;
meta_ver = *((u8 *)system_table + ver_start) & ((1 << ver_len) - 1);
if (meta_ver < 5) {
if (meta_ver < 4) {
dd_dev_info(
dd, "%s:Please update platform config\n", __func__);
return -EINVAL;
......
......@@ -276,7 +276,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (IS_ERR(ps->s_txreq))
goto bail_no_tx;
ps->s_txreq->phdr.hdr.hdr_type = priv->hdr_type;
if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
hwords = 5;
......
......@@ -151,7 +151,7 @@ void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr,
*opcode = ib_bth_get_opcode(ohdr);
*tver = ib_bth_get_tver(ohdr);
*pkey = ib_bth_get_pkey(ohdr);
*psn = ib_bth_get_psn(ohdr);
*psn = mask_psn(ib_bth_get_psn(ohdr));
*qpn = ib_bth_get_qpn(ohdr);
}
......@@ -166,7 +166,7 @@ void hfi1_trace_parse_16b_bth(struct ib_other_headers *ohdr,
*pad = ib_bth_get_pad(ohdr);
*se = ib_bth_get_se(ohdr);
*tver = ib_bth_get_tver(ohdr);
*psn = ib_bth_get_psn(ohdr);
*psn = mask_psn(ib_bth_get_psn(ohdr));
*qpn = ib_bth_get_qpn(ohdr);
}
......
......@@ -93,7 +93,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto done_free_tx;
}
ps->s_txreq->phdr.hdr.hdr_type = priv->hdr_type;
if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
hwords = 5;
......
......@@ -146,6 +146,9 @@ static int pio_wait(struct rvt_qp *qp,
/* Length of buffer to create verbs txreq cache name */
#define TXREQ_NAME_LEN 24
/* 16B trailing buffer */
static const u8 trail_buf[MAX_16B_PADDING];
static uint wss_threshold;
module_param(wss_threshold, uint, S_IRUGO);
MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
......@@ -813,7 +816,6 @@ static int build_verbs_tx_desc(
struct hfi1_sdma_header *phdr = &tx->phdr;
u16 hdrbytes = tx->hdr_dwords << 2;
u8 extra_bytes = 0;
static char trail_buf[12]; /* CRC = 4, LT = 1, Pad = 0 to 7 bytes */
if (tx->phdr.hdr.hdr_type) {
/*
......@@ -865,9 +867,9 @@ static int build_verbs_tx_desc(
}
/* add icrc, lt byte, and padding to flit */
if (extra_bytes != 0)
if (extra_bytes)
ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
trail_buf, extra_bytes);
(void *)trail_buf, extra_bytes);
bail_txadd:
return ret;
......@@ -1118,18 +1120,10 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
len -= slen;
}
}
/*
* Bypass packet will need to copy additional
* bytes to accommodate for CRC and LT bytes
*/
if (extra_bytes) {
u8 *empty_buf;
/* add icrc, lt byte, and padding to flit */
if (extra_bytes)
seg_pio_copy_mid(pbuf, trail_buf, extra_bytes);
empty_buf = kcalloc(extra_bytes, sizeof(u8),
GFP_KERNEL);
seg_pio_copy_mid(pbuf, empty_buf, extra_bytes);
kfree(empty_buf);
}
seg_pio_copy_end(pbuf);
}
......
......@@ -92,6 +92,8 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
tx->psc = priv->s_sendcontext;
/* so that we can test if the sdma decriptors are there */
tx->txreq.num_desc = 0;
/* Set the header type */
tx->phdr.hdr.hdr_type = priv->hdr_type;
return tx;
}
......
......@@ -351,7 +351,7 @@ int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int last = 0;
int ret = 0;
if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
if (ibqp->qp_num <= 1)
return -EINVAL;
spin_lock_irq(&ibp->lock);
......
......@@ -717,7 +717,6 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
/* take qp out the hash and wait for it to be unused */
rvt_remove_qp(rdi, qp);
wait_event(qp->wait, !atomic_read(&qp->refcount));
/* grab the lock b/c it was locked at call time */
spin_lock_irq(&qp->r_lock);
......@@ -1444,6 +1443,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp)
spin_unlock(&qp->s_hlock);
spin_unlock_irq(&qp->r_lock);
wait_event(qp->wait, !atomic_read(&qp->refcount));
/* qpn is now available for use again */
rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment