Commit 17fb4f29 authored by Jubin John's avatar Jubin John Committed by Doug Ledford

staging/rdma/hfi1: Fix code alignment

Fix code alignment to fix checkpatch check:
CHECK: Alignment should match open parenthesis
Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarJubin John <jubin.john@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 4d114fdd
This diff is collapsed.
...@@ -150,8 +150,8 @@ static int _opcode_stats_seq_show(struct seq_file *s, void *v) ...@@ -150,8 +150,8 @@ static int _opcode_stats_seq_show(struct seq_file *s, void *v)
if (!n_packets && !n_bytes) if (!n_packets && !n_bytes)
return SEQ_SKIP; return SEQ_SKIP;
seq_printf(s, "%02llx %llu/%llu\n", i, seq_printf(s, "%02llx %llu/%llu\n", i,
(unsigned long long)n_packets, (unsigned long long)n_packets,
(unsigned long long)n_bytes); (unsigned long long)n_bytes);
return 0; return 0;
} }
...@@ -246,7 +246,7 @@ __acquires(RCU) ...@@ -246,7 +246,7 @@ __acquires(RCU)
} }
static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
loff_t *pos) loff_t *pos)
{ {
struct qp_iter *iter = iter_ptr; struct qp_iter *iter = iter_ptr;
...@@ -392,7 +392,7 @@ static ssize_t portnames_read(struct file *file, char __user *buf, ...@@ -392,7 +392,7 @@ static ssize_t portnames_read(struct file *file, char __user *buf,
/* read the per-port counters */ /* read the per-port counters */
static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf, static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
u64 *counters; u64 *counters;
size_t avail; size_t avail;
...@@ -413,7 +413,7 @@ static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf, ...@@ -413,7 +413,7 @@ static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf,
* read the per-port QSFP data for ppd * read the per-port QSFP data for ppd
*/ */
static ssize_t qsfp_debugfs_dump(struct file *file, char __user *buf, static ssize_t qsfp_debugfs_dump(struct file *file, char __user *buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct hfi1_pportdata *ppd; struct hfi1_pportdata *ppd;
char *tmp; char *tmp;
...@@ -437,7 +437,7 @@ static ssize_t qsfp_debugfs_dump(struct file *file, char __user *buf, ...@@ -437,7 +437,7 @@ static ssize_t qsfp_debugfs_dump(struct file *file, char __user *buf,
/* Do an i2c write operation on the chain for the given HFI. */ /* Do an i2c write operation on the chain for the given HFI. */
static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf, static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos, u32 target) size_t count, loff_t *ppos, u32 target)
{ {
struct hfi1_pportdata *ppd; struct hfi1_pportdata *ppd;
char *buff; char *buff;
...@@ -484,21 +484,21 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf, ...@@ -484,21 +484,21 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
/* Do an i2c write operation on chain for HFI 0. */ /* Do an i2c write operation on chain for HFI 0. */
static ssize_t i2c1_debugfs_write(struct file *file, const char __user *buf, static ssize_t i2c1_debugfs_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
return __i2c_debugfs_write(file, buf, count, ppos, 0); return __i2c_debugfs_write(file, buf, count, ppos, 0);
} }
/* Do an i2c write operation on chain for HFI 1. */ /* Do an i2c write operation on chain for HFI 1. */
static ssize_t i2c2_debugfs_write(struct file *file, const char __user *buf, static ssize_t i2c2_debugfs_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
return __i2c_debugfs_write(file, buf, count, ppos, 1); return __i2c_debugfs_write(file, buf, count, ppos, 1);
} }
/* Do an i2c read operation on the chain for the given HFI. */ /* Do an i2c read operation on the chain for the given HFI. */
static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf, static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos, u32 target) size_t count, loff_t *ppos, u32 target)
{ {
struct hfi1_pportdata *ppd; struct hfi1_pportdata *ppd;
char *buff; char *buff;
...@@ -545,21 +545,21 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf, ...@@ -545,21 +545,21 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
/* Do an i2c read operation on chain for HFI 0. */ /* Do an i2c read operation on chain for HFI 0. */
static ssize_t i2c1_debugfs_read(struct file *file, char __user *buf, static ssize_t i2c1_debugfs_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
return __i2c_debugfs_read(file, buf, count, ppos, 0); return __i2c_debugfs_read(file, buf, count, ppos, 0);
} }
/* Do an i2c read operation on chain for HFI 1. */ /* Do an i2c read operation on chain for HFI 1. */
static ssize_t i2c2_debugfs_read(struct file *file, char __user *buf, static ssize_t i2c2_debugfs_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
return __i2c_debugfs_read(file, buf, count, ppos, 1); return __i2c_debugfs_read(file, buf, count, ppos, 1);
} }
/* Do a QSFP write operation on the i2c chain for the given HFI. */ /* Do a QSFP write operation on the i2c chain for the given HFI. */
static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf, static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos, u32 target) size_t count, loff_t *ppos, u32 target)
{ {
struct hfi1_pportdata *ppd; struct hfi1_pportdata *ppd;
char *buff; char *buff;
...@@ -605,21 +605,21 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf, ...@@ -605,21 +605,21 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
/* Do a QSFP write operation on i2c chain for HFI 0. */ /* Do a QSFP write operation on i2c chain for HFI 0. */
static ssize_t qsfp1_debugfs_write(struct file *file, const char __user *buf, static ssize_t qsfp1_debugfs_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
return __qsfp_debugfs_write(file, buf, count, ppos, 0); return __qsfp_debugfs_write(file, buf, count, ppos, 0);
} }
/* Do a QSFP write operation on i2c chain for HFI 1. */ /* Do a QSFP write operation on i2c chain for HFI 1. */
static ssize_t qsfp2_debugfs_write(struct file *file, const char __user *buf, static ssize_t qsfp2_debugfs_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
return __qsfp_debugfs_write(file, buf, count, ppos, 1); return __qsfp_debugfs_write(file, buf, count, ppos, 1);
} }
/* Do a QSFP read operation on the i2c chain for the given HFI. */ /* Do a QSFP read operation on the i2c chain for the given HFI. */
static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf, static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos, u32 target) size_t count, loff_t *ppos, u32 target)
{ {
struct hfi1_pportdata *ppd; struct hfi1_pportdata *ppd;
char *buff; char *buff;
...@@ -665,14 +665,14 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf, ...@@ -665,14 +665,14 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf,
/* Do a QSFP read operation on i2c chain for HFI 0. */ /* Do a QSFP read operation on i2c chain for HFI 0. */
static ssize_t qsfp1_debugfs_read(struct file *file, char __user *buf, static ssize_t qsfp1_debugfs_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
return __qsfp_debugfs_read(file, buf, count, ppos, 0); return __qsfp_debugfs_read(file, buf, count, ppos, 0);
} }
/* Do a QSFP read operation on i2c chain for HFI 1. */ /* Do a QSFP read operation on i2c chain for HFI 1. */
static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf, static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
return __qsfp_debugfs_read(file, buf, count, ppos, 1); return __qsfp_debugfs_read(file, buf, count, ppos, 1);
} }
......
...@@ -438,7 +438,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, ...@@ -438,7 +438,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
} }
static inline void init_packet(struct hfi1_ctxtdata *rcd, static inline void init_packet(struct hfi1_ctxtdata *rcd,
struct hfi1_packet *packet) struct hfi1_packet *packet)
{ {
packet->rsize = rcd->rcvhdrqentsize; /* words */ packet->rsize = rcd->rcvhdrqentsize; /* words */
packet->maxcnt = rcd->rcvhdrq_cnt * packet->rsize; /* words */ packet->maxcnt = rcd->rcvhdrq_cnt * packet->rsize; /* words */
...@@ -700,8 +700,9 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread) ...@@ -700,8 +700,9 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
* The +2 is the size of the RHF. * The +2 is the size of the RHF.
*/ */
prefetch_range(packet->ebuf, prefetch_range(packet->ebuf,
packet->tlen - ((packet->rcd->rcvhdrqentsize - packet->tlen - ((packet->rcd->rcvhdrqentsize -
(rhf_hdrq_offset(packet->rhf) + 2)) * 4)); (rhf_hdrq_offset(packet->rhf)
+ 2)) * 4));
} }
/* /*
...@@ -958,9 +959,9 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) ...@@ -958,9 +959,9 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
prescan_rxq(rcd, &packet); prescan_rxq(rcd, &packet);
while (last == RCV_PKT_OK) { while (last == RCV_PKT_OK) {
if (unlikely(dd->do_drop &&
if (unlikely(dd->do_drop && atomic_xchg(&dd->drop_packet, atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) ==
DROP_PACKET_OFF) == DROP_PACKET_ON)) { DROP_PACKET_ON)) {
dd->do_drop = 0; dd->do_drop = 0;
/* On to the next packet */ /* On to the next packet */
...@@ -990,8 +991,7 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) ...@@ -990,8 +991,7 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
if (seq != rcd->seq_cnt) if (seq != rcd->seq_cnt)
last = RCV_PKT_DONE; last = RCV_PKT_DONE;
if (needset) { if (needset) {
dd_dev_info(dd, dd_dev_info(dd, "Switching to NO_DMA_RTAIL\n");
"Switching to NO_DMA_RTAIL\n");
set_all_nodma_rtail(dd); set_all_nodma_rtail(dd);
needset = 0; needset = 0;
} }
...@@ -1234,7 +1234,7 @@ void hfi1_set_led_override(struct hfi1_pportdata *ppd, unsigned int timeon, ...@@ -1234,7 +1234,7 @@ void hfi1_set_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
if (atomic_inc_return(&ppd->led_override_timer_active) == 1) { if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
/* Need to start timer */ /* Need to start timer */
setup_timer(&ppd->led_override_timer, run_led_override, setup_timer(&ppd->led_override_timer, run_led_override,
(unsigned long)ppd); (unsigned long)ppd);
ppd->led_override_timer.expires = jiffies + 1; ppd->led_override_timer.expires = jiffies + 1;
add_timer(&ppd->led_override_timer); add_timer(&ppd->led_override_timer);
...@@ -1271,8 +1271,8 @@ int hfi1_reset_device(int unit) ...@@ -1271,8 +1271,8 @@ int hfi1_reset_device(int unit)
if (!dd->kregbase || !(dd->flags & HFI1_PRESENT)) { if (!dd->kregbase || !(dd->flags & HFI1_PRESENT)) {
dd_dev_info(dd, dd_dev_info(dd,
"Invalid unit number %u or not initialized or not present\n", "Invalid unit number %u or not initialized or not present\n",
unit); unit);
ret = -ENXIO; ret = -ENXIO;
goto bail; goto bail;
} }
...@@ -1302,11 +1302,11 @@ int hfi1_reset_device(int unit) ...@@ -1302,11 +1302,11 @@ int hfi1_reset_device(int unit)
if (ret) if (ret)
dd_dev_err(dd, dd_dev_err(dd,
"Reinitialize unit %u after reset failed with %d\n", "Reinitialize unit %u after reset failed with %d\n",
unit, ret); unit, ret);
else else
dd_dev_info(dd, "Reinitialized unit %u after resetting\n", dd_dev_info(dd, "Reinitialized unit %u after resetting\n",
unit); unit);
bail: bail:
return ret; return ret;
...@@ -1363,7 +1363,7 @@ int process_receive_bypass(struct hfi1_packet *packet) ...@@ -1363,7 +1363,7 @@ int process_receive_bypass(struct hfi1_packet *packet)
handle_eflags(packet); handle_eflags(packet);
dd_dev_err(packet->rcd->dd, dd_dev_err(packet->rcd->dd,
"Bypass packets are not supported in normal operation. Dropping\n"); "Bypass packets are not supported in normal operation. Dropping\n");
return RHF_RCV_CONTINUE; return RHF_RCV_CONTINUE;
} }
...@@ -1401,6 +1401,6 @@ int kdeth_process_eager(struct hfi1_packet *packet) ...@@ -1401,6 +1401,6 @@ int kdeth_process_eager(struct hfi1_packet *packet)
int process_receive_invalid(struct hfi1_packet *packet) int process_receive_invalid(struct hfi1_packet *packet)
{ {
dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n", dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n",
rhf_rcv_type(packet->rhf)); rhf_rcv_type(packet->rhf));
return RHF_RCV_CONTINUE; return RHF_RCV_CONTINUE;
} }
...@@ -115,11 +115,9 @@ static DEFINE_MUTEX(eprom_mutex); ...@@ -115,11 +115,9 @@ static DEFINE_MUTEX(eprom_mutex);
static void write_enable(struct hfi1_devdata *dd) static void write_enable(struct hfi1_devdata *dd)
{ {
/* raise signal */ /* raise signal */
write_csr(dd, ASIC_GPIO_OUT, write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) | EPROM_WP_N);
read_csr(dd, ASIC_GPIO_OUT) | EPROM_WP_N);
/* raise enable */ /* raise enable */
write_csr(dd, ASIC_GPIO_OE, write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) | EPROM_WP_N);
read_csr(dd, ASIC_GPIO_OE) | EPROM_WP_N);
} }
/* /*
...@@ -128,11 +126,9 @@ static void write_enable(struct hfi1_devdata *dd) ...@@ -128,11 +126,9 @@ static void write_enable(struct hfi1_devdata *dd)
static void write_disable(struct hfi1_devdata *dd) static void write_disable(struct hfi1_devdata *dd)
{ {
/* lower signal */ /* lower signal */
write_csr(dd, ASIC_GPIO_OUT, write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) & ~EPROM_WP_N);
read_csr(dd, ASIC_GPIO_OUT) & ~EPROM_WP_N);
/* lower enable */ /* lower enable */
write_csr(dd, ASIC_GPIO_OE, write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) & ~EPROM_WP_N);
read_csr(dd, ASIC_GPIO_OE) & ~EPROM_WP_N);
} }
/* /*
...@@ -210,8 +206,8 @@ static int erase_range(struct hfi1_devdata *dd, u32 start, u32 len) ...@@ -210,8 +206,8 @@ static int erase_range(struct hfi1_devdata *dd, u32 start, u32 len)
/* check the end points for the minimum erase */ /* check the end points for the minimum erase */
if ((start & MASK_4KB) || (end & MASK_4KB)) { if ((start & MASK_4KB) || (end & MASK_4KB)) {
dd_dev_err(dd, dd_dev_err(dd,
"%s: non-aligned range (0x%x,0x%x) for a 4KB erase\n", "%s: non-aligned range (0x%x,0x%x) for a 4KB erase\n",
__func__, start, end); __func__, start, end);
return -EINVAL; return -EINVAL;
} }
...@@ -275,7 +271,7 @@ static int read_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr) ...@@ -275,7 +271,7 @@ static int read_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr)
for (offset = 0; offset < len; offset += EP_PAGE_SIZE) { for (offset = 0; offset < len; offset += EP_PAGE_SIZE) {
read_page(dd, start + offset, buffer); read_page(dd, start + offset, buffer);
if (copy_to_user((void __user *)(addr + offset), if (copy_to_user((void __user *)(addr + offset),
buffer, EP_PAGE_SIZE)) { buffer, EP_PAGE_SIZE)) {
ret = -EFAULT; ret = -EFAULT;
goto done; goto done;
} }
...@@ -319,7 +315,7 @@ static int write_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr) ...@@ -319,7 +315,7 @@ static int write_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr)
for (offset = 0; offset < len; offset += EP_PAGE_SIZE) { for (offset = 0; offset < len; offset += EP_PAGE_SIZE) {
if (copy_from_user(buffer, (void __user *)(addr + offset), if (copy_from_user(buffer, (void __user *)(addr + offset),
EP_PAGE_SIZE)) { EP_PAGE_SIZE)) {
ret = -EFAULT; ret = -EFAULT;
goto done; goto done;
} }
...@@ -385,13 +381,13 @@ int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd) ...@@ -385,13 +381,13 @@ int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd)
ret = acquire_hw_mutex(dd); ret = acquire_hw_mutex(dd);
if (ret) { if (ret) {
dd_dev_err(dd, dd_dev_err(dd,
"%s: unable to acquire hw mutex, no EPROM support\n", "%s: unable to acquire hw mutex, no EPROM support\n",
__func__); __func__);
goto done_asic; goto done_asic;
} }
dd_dev_info(dd, "%s: cmd: type %d, len 0x%x, addr 0x%016llx\n", dd_dev_info(dd, "%s: cmd: type %d, len 0x%x, addr 0x%016llx\n",
__func__, cmd->type, cmd->len, cmd->addr); __func__, cmd->type, cmd->len, cmd->addr);
switch (cmd->type) { switch (cmd->type) {
case HFI1_CMD_EP_INFO: case HFI1_CMD_EP_INFO:
...@@ -402,7 +398,7 @@ int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd) ...@@ -402,7 +398,7 @@ int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd)
dev_id = read_device_id(dd); dev_id = read_device_id(dd);
/* addr points to a u32 user buffer */ /* addr points to a u32 user buffer */
if (copy_to_user((void __user *)cmd->addr, &dev_id, if (copy_to_user((void __user *)cmd->addr, &dev_id,
sizeof(u32))) sizeof(u32)))
ret = -EFAULT; ret = -EFAULT;
break; break;
...@@ -430,7 +426,7 @@ int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd) ...@@ -430,7 +426,7 @@ int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd)
default: default:
dd_dev_err(dd, "%s: unexpected command %d\n", dd_dev_err(dd, "%s: unexpected command %d\n",
__func__, cmd->type); __func__, cmd->type);
ret = -EINVAL; ret = -EINVAL;
break; break;
} }
...@@ -464,19 +460,18 @@ int eprom_init(struct hfi1_devdata *dd) ...@@ -464,19 +460,18 @@ int eprom_init(struct hfi1_devdata *dd)
ret = acquire_hw_mutex(dd); ret = acquire_hw_mutex(dd);
if (ret) { if (ret) {
dd_dev_err(dd, dd_dev_err(dd,
"%s: unable to acquire hw mutex, no EPROM support\n", "%s: unable to acquire hw mutex, no EPROM support\n",
__func__); __func__);
goto done_asic; goto done_asic;
} }
/* reset EPROM to be sure it is in a good state */ /* reset EPROM to be sure it is in a good state */
/* set reset */ /* set reset */
write_csr(dd, ASIC_EEP_CTL_STAT, write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK);
ASIC_EEP_CTL_STAT_EP_RESET_SMASK);
/* clear reset, set speed */ /* clear reset, set speed */
write_csr(dd, ASIC_EEP_CTL_STAT, write_csr(dd, ASIC_EEP_CTL_STAT,
EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT); EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT);
/* wake the device with command "release powerdown NoID" */ /* wake the device with command "release powerdown NoID" */
write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID); write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID);
......
...@@ -1145,9 +1145,9 @@ static int user_init(struct file *fp) ...@@ -1145,9 +1145,9 @@ static int user_init(struct file *fp)
* has done it. * has done it.
*/ */
if (fd->subctxt) { if (fd->subctxt) {
ret = wait_event_interruptible(uctxt->wait, ret = wait_event_interruptible(uctxt->wait, !test_bit(
!test_bit(HFI1_CTXT_MASTER_UNINIT, HFI1_CTXT_MASTER_UNINIT,
&uctxt->event_flags)); &uctxt->event_flags));
goto expected; goto expected;
} }
...@@ -1592,7 +1592,7 @@ static loff_t ui_lseek(struct file *filp, loff_t offset, int whence) ...@@ -1592,7 +1592,7 @@ static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
/* NOTE: assumes unsigned long is 8 bytes */ /* NOTE: assumes unsigned long is 8 bytes */
static ssize_t ui_read(struct file *filp, char __user *buf, size_t count, static ssize_t ui_read(struct file *filp, char __user *buf, size_t count,
loff_t *f_pos) loff_t *f_pos)
{ {
struct hfi1_devdata *dd = filp->private_data; struct hfi1_devdata *dd = filp->private_data;
void __iomem *base = dd->kregbase; void __iomem *base = dd->kregbase;
......
This diff is collapsed.
...@@ -1715,8 +1715,9 @@ void restore_pci_variables(struct hfi1_devdata *dd); ...@@ -1715,8 +1715,9 @@ void restore_pci_variables(struct hfi1_devdata *dd);
int do_pcie_gen3_transition(struct hfi1_devdata *dd); int do_pcie_gen3_transition(struct hfi1_devdata *dd);
int parse_platform_config(struct hfi1_devdata *dd); int parse_platform_config(struct hfi1_devdata *dd);
int get_platform_config_field(struct hfi1_devdata *dd, int get_platform_config_field(struct hfi1_devdata *dd,
enum platform_config_table_type_encoding table_type, enum platform_config_table_type_encoding
int table_index, int field_index, u32 *data, u32 len); table_type, int table_index, int field_index,
u32 *data, u32 len);
const char *get_unit_name(int unit); const char *get_unit_name(int unit);
const char *get_card_name(struct rvt_dev_info *rdi); const char *get_card_name(struct rvt_dev_info *rdi);
......
...@@ -149,7 +149,7 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd) ...@@ -149,7 +149,7 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
rcd = hfi1_create_ctxtdata(ppd, i, dd->node); rcd = hfi1_create_ctxtdata(ppd, i, dd->node);
if (!rcd) { if (!rcd) {
dd_dev_err(dd, dd_dev_err(dd,
"Unable to allocate kernel receive context, failing\n"); "Unable to allocate kernel receive context, failing\n");
goto nomem; goto nomem;
} }
/* /*
...@@ -170,7 +170,7 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd) ...@@ -170,7 +170,7 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
if (!rcd->sc) { if (!rcd->sc) {
dd_dev_err(dd, dd_dev_err(dd,
"Unable to allocate kernel send context, failing\n"); "Unable to allocate kernel send context, failing\n");
dd->rcd[rcd->ctxt] = NULL; dd->rcd[rcd->ctxt] = NULL;
hfi1_free_ctxtdata(dd, rcd); hfi1_free_ctxtdata(dd, rcd);
goto nomem; goto nomem;
...@@ -741,7 +741,7 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit) ...@@ -741,7 +741,7 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
lastfail = hfi1_setup_eagerbufs(rcd); lastfail = hfi1_setup_eagerbufs(rcd);
if (lastfail) if (lastfail)
dd_dev_err(dd, dd_dev_err(dd,
"failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
} }
if (lastfail) if (lastfail)
ret = lastfail; ret = lastfail;
...@@ -797,8 +797,8 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit) ...@@ -797,8 +797,8 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
lastfail = bringup_serdes(ppd); lastfail = bringup_serdes(ppd);
if (lastfail) if (lastfail)
dd_dev_info(dd, dd_dev_info(dd,
"Failed to bring up port %u\n", "Failed to bring up port %u\n",
ppd->port); ppd->port);
/* /*
* Set status even if port serdes is not initialized * Set status even if port serdes is not initialized
...@@ -1542,8 +1542,8 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) ...@@ -1542,8 +1542,8 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
if (!rcd->rcvhdrq) { if (!rcd->rcvhdrq) {
dd_dev_err(dd, dd_dev_err(dd,
"attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
amt, rcd->ctxt); amt, rcd->ctxt);
goto bail; goto bail;
} }
...@@ -1587,8 +1587,8 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) ...@@ -1587,8 +1587,8 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
bail_free: bail_free:
dd_dev_err(dd, dd_dev_err(dd,
"attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
rcd->ctxt); rcd->ctxt);
vfree(rcd->user_event_mask); vfree(rcd->user_event_mask);
rcd->user_event_mask = NULL; rcd->user_event_mask = NULL;
dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
...@@ -1678,7 +1678,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) ...@@ -1678,7 +1678,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
if (rcd->egrbufs.rcvtid_size == round_mtu || if (rcd->egrbufs.rcvtid_size == round_mtu ||
!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) { !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n", dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
rcd->ctxt); rcd->ctxt);
goto bail_rcvegrbuf_phys; goto bail_rcvegrbuf_phys;
} }
...@@ -1760,14 +1760,14 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) ...@@ -1760,14 +1760,14 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
for (idx = 0; idx < rcd->egrbufs.alloced; idx++) { for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER, hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
rcd->egrbufs.rcvtids[idx].phys, order); rcd->egrbufs.rcvtids[idx].phys, order);
cond_resched(); cond_resched();
} }
goto bail; goto bail;
bail_rcvegrbuf_phys: bail_rcvegrbuf_phys:
for (idx = 0; idx < rcd->egrbufs.alloced && for (idx = 0; idx < rcd->egrbufs.alloced &&
rcd->egrbufs.buffers[idx].addr; rcd->egrbufs.buffers[idx].addr;
idx++) { idx++) {
dma_free_coherent(&dd->pcidev->dev, dma_free_coherent(&dd->pcidev->dev,
rcd->egrbufs.buffers[idx].len, rcd->egrbufs.buffers[idx].len,
......
...@@ -135,18 +135,16 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup) ...@@ -135,18 +135,16 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
set_up_vl15(dd, dd->vau, dd->vl15_init); set_up_vl15(dd, dd->vau, dd->vl15_init);
assign_remote_cm_au_table(dd, dd->vcu); assign_remote_cm_au_table(dd, dd->vcu);
ppd->neighbor_guid = ppd->neighbor_guid =
read_csr(dd, read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
DC_DC8051_STS_REMOTE_GUID);
ppd->neighbor_type = ppd->neighbor_type =
read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) & read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK; DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
ppd->neighbor_port_number = ppd->neighbor_port_number =
read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) & read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK; DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
dd_dev_info(dd, dd_dev_info(dd, "Neighbor GUID: %llx Neighbor type %d\n",
"Neighbor GUID: %llx Neighbor type %d\n", ppd->neighbor_guid,
ppd->neighbor_guid, ppd->neighbor_type);
ppd->neighbor_type);
} }
/* physical link went up */ /* physical link went up */
......
This diff is collapsed.
...@@ -217,10 +217,9 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev, ...@@ -217,10 +217,9 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev,
pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl); pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl);
pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL, &dd->pcie_lnkctl); pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL, &dd->pcie_lnkctl);
pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL2, pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL2,
&dd->pcie_devctl2); &dd->pcie_devctl2);
pci_read_config_dword(dd->pcidev, PCI_CFG_MSIX0, &dd->pci_msix0); pci_read_config_dword(dd->pcidev, PCI_CFG_MSIX0, &dd->pci_msix0);
pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, &dd->pci_lnkctl3);
&dd->pci_lnkctl3);
pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, &dd->pci_tph2); pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, &dd->pci_tph2);
return 0; return 0;
...@@ -271,7 +270,7 @@ void hfi1_pcie_flr(struct hfi1_devdata *dd) ...@@ -271,7 +270,7 @@ void hfi1_pcie_flr(struct hfi1_devdata *dd)
clear: clear:
pcie_capability_set_word(dd->pcidev, PCI_EXP_DEVCTL, pcie_capability_set_word(dd->pcidev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_BCR_FLR); PCI_EXP_DEVCTL_BCR_FLR);
/* PCIe spec requires the function to be back within 100ms */ /* PCIe spec requires the function to be back within 100ms */
msleep(100); msleep(100);
} }
...@@ -377,8 +376,8 @@ int pcie_speeds(struct hfi1_devdata *dd) ...@@ -377,8 +376,8 @@ int pcie_speeds(struct hfi1_devdata *dd)
pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &linkcap); pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &linkcap);
if ((linkcap & PCI_EXP_LNKCAP_SLS) != GEN3_SPEED_VECTOR) { if ((linkcap & PCI_EXP_LNKCAP_SLS) != GEN3_SPEED_VECTOR) {
dd_dev_info(dd, dd_dev_info(dd,
"This HFI is not Gen3 capable, max speed 0x%x, need 0x3\n", "This HFI is not Gen3 capable, max speed 0x%x, need 0x3\n",
linkcap & PCI_EXP_LNKCAP_SLS); linkcap & PCI_EXP_LNKCAP_SLS);
dd->link_gen3_capable = 0; dd->link_gen3_capable = 0;
} }
...@@ -432,19 +431,15 @@ void hfi1_enable_intx(struct pci_dev *pdev) ...@@ -432,19 +431,15 @@ void hfi1_enable_intx(struct pci_dev *pdev)
void restore_pci_variables(struct hfi1_devdata *dd) void restore_pci_variables(struct hfi1_devdata *dd)
{ {
pci_write_config_word(dd->pcidev, PCI_COMMAND, dd->pci_command); pci_write_config_word(dd->pcidev, PCI_COMMAND, dd->pci_command);
pci_write_config_dword(dd->pcidev, pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, dd->pcibar0);
PCI_BASE_ADDRESS_0, dd->pcibar0); pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, dd->pcibar1);
pci_write_config_dword(dd->pcidev, pci_write_config_dword(dd->pcidev, PCI_ROM_ADDRESS, dd->pci_rom);
PCI_BASE_ADDRESS_1, dd->pcibar1);
pci_write_config_dword(dd->pcidev,
PCI_ROM_ADDRESS, dd->pci_rom);
pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, dd->pcie_devctl); pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, dd->pcie_devctl);
pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL, dd->pcie_lnkctl); pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL, dd->pcie_lnkctl);
pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL2, pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL2,
dd->pcie_devctl2); dd->pcie_devctl2);
pci_write_config_dword(dd->pcidev, PCI_CFG_MSIX0, dd->pci_msix0); pci_write_config_dword(dd->pcidev, PCI_CFG_MSIX0, dd->pci_msix0);
pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, dd->pci_lnkctl3);
dd->pci_lnkctl3);
pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, dd->pci_tph2); pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, dd->pci_tph2);
} }
...@@ -746,21 +741,22 @@ static int load_eq_table(struct hfi1_devdata *dd, const u8 eq[11][3], u8 fs, ...@@ -746,21 +741,22 @@ static int load_eq_table(struct hfi1_devdata *dd, const u8 eq[11][3], u8 fs,
c0 = fs - (eq[i][PREC] / div) - (eq[i][POST] / div); c0 = fs - (eq[i][PREC] / div) - (eq[i][POST] / div);
c_plus1 = eq[i][POST] / div; c_plus1 = eq[i][POST] / div;
pci_write_config_dword(pdev, PCIE_CFG_REG_PL102, pci_write_config_dword(pdev, PCIE_CFG_REG_PL102,
eq_value(c_minus1, c0, c_plus1)); eq_value(c_minus1, c0, c_plus1));
/* check if these coefficients violate EQ rules */ /* check if these coefficients violate EQ rules */
pci_read_config_dword(dd->pcidev, PCIE_CFG_REG_PL105, pci_read_config_dword(dd->pcidev, PCIE_CFG_REG_PL105,
&violation); &violation);
if (violation if (violation
& PCIE_CFG_REG_PL105_GEN3_EQ_VIOLATE_COEF_RULES_SMASK){ & PCIE_CFG_REG_PL105_GEN3_EQ_VIOLATE_COEF_RULES_SMASK){
if (hit_error == 0) { if (hit_error == 0) {
dd_dev_err(dd, dd_dev_err(dd,
"Gen3 EQ Table Coefficient rule violations\n"); "Gen3 EQ Table Coefficient rule violations\n");
dd_dev_err(dd, " prec attn post\n"); dd_dev_err(dd, " prec attn post\n");
} }
dd_dev_err(dd, " p%02d: %02x %02x %02x\n", dd_dev_err(dd, " p%02d: %02x %02x %02x\n",
i, (u32)eq[i][0], (u32)eq[i][1], (u32)eq[i][2]); i, (u32)eq[i][0], (u32)eq[i][1],
(u32)eq[i][2]);
dd_dev_err(dd, " %02x %02x %02x\n", dd_dev_err(dd, " %02x %02x %02x\n",
(u32)c_minus1, (u32)c0, (u32)c_plus1); (u32)c_minus1, (u32)c0, (u32)c_plus1);
hit_error = 1; hit_error = 1;
} }
} }
...@@ -815,8 +811,8 @@ static int trigger_sbr(struct hfi1_devdata *dd) ...@@ -815,8 +811,8 @@ static int trigger_sbr(struct hfi1_devdata *dd)
list_for_each_entry(pdev, &dev->bus->devices, bus_list) list_for_each_entry(pdev, &dev->bus->devices, bus_list)
if (pdev != dev) { if (pdev != dev) {
dd_dev_err(dd, dd_dev_err(dd,
"%s: another device is on the same bus\n", "%s: another device is on the same bus\n",
__func__); __func__);
return -ENOTTY; return -ENOTTY;
} }
...@@ -840,8 +836,8 @@ static void write_gasket_interrupt(struct hfi1_devdata *dd, int index, ...@@ -840,8 +836,8 @@ static void write_gasket_interrupt(struct hfi1_devdata *dd, int index,
u16 code, u16 data) u16 code, u16 data)
{ {
write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (index * 8), write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (index * 8),
(((u64)code << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_CODE_SHIFT) (((u64)code << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_CODE_SHIFT) |
| ((u64)data << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_DATA_SHIFT))); ((u64)data << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_DATA_SHIFT)));
} }
/* /*
...@@ -851,14 +847,13 @@ static void arm_gasket_logic(struct hfi1_devdata *dd) ...@@ -851,14 +847,13 @@ static void arm_gasket_logic(struct hfi1_devdata *dd)
{ {
u64 reg; u64 reg;
reg = (((u64)1 << dd->hfi1_id) reg = (((u64)1 << dd->hfi1_id) <<
<< ASIC_PCIE_SD_HOST_CMD_INTRPT_CMD_SHIFT) ASIC_PCIE_SD_HOST_CMD_INTRPT_CMD_SHIFT) |
| ((u64)pcie_serdes_broadcast[dd->hfi1_id] ((u64)pcie_serdes_broadcast[dd->hfi1_id] <<
<< ASIC_PCIE_SD_HOST_CMD_SBUS_RCVR_ADDR_SHIFT ASIC_PCIE_SD_HOST_CMD_SBUS_RCVR_ADDR_SHIFT |
| ASIC_PCIE_SD_HOST_CMD_SBR_MODE_SMASK ASIC_PCIE_SD_HOST_CMD_SBR_MODE_SMASK |
| ((u64)SBR_DELAY_US & ASIC_PCIE_SD_HOST_CMD_TIMER_MASK) ((u64)SBR_DELAY_US & ASIC_PCIE_SD_HOST_CMD_TIMER_MASK) <<
<< ASIC_PCIE_SD_HOST_CMD_TIMER_SHIFT ASIC_PCIE_SD_HOST_CMD_TIMER_SHIFT);
);
write_csr(dd, ASIC_PCIE_SD_HOST_CMD, reg); write_csr(dd, ASIC_PCIE_SD_HOST_CMD, reg);
/* read back to push the write */ /* read back to push the write */
read_csr(dd, ASIC_PCIE_SD_HOST_CMD); read_csr(dd, ASIC_PCIE_SD_HOST_CMD);
...@@ -982,8 +977,8 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd) ...@@ -982,8 +977,8 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
/* if already at target speed, done (unless forced) */ /* if already at target speed, done (unless forced) */
if (dd->lbus_speed == target_speed) { if (dd->lbus_speed == target_speed) {
dd_dev_info(dd, "%s: PCIe already at gen%d, %s\n", __func__, dd_dev_info(dd, "%s: PCIe already at gen%d, %s\n", __func__,
pcie_target, pcie_target,
pcie_force ? "re-doing anyway" : "skipping"); pcie_force ? "re-doing anyway" : "skipping");
if (!pcie_force) if (!pcie_force)
return 0; return 0;
} }
...@@ -1087,8 +1082,10 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd) ...@@ -1087,8 +1082,10 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
default_pset = DEFAULT_MCP_PSET; default_pset = DEFAULT_MCP_PSET;
} }
pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL101, pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL101,
(fs << PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_FS_SHIFT) (fs <<
| (lf << PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_LF_SHIFT)); PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_FS_SHIFT) |
(lf <<
PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_LF_SHIFT));
ret = load_eq_table(dd, eq, fs, div); ret = load_eq_table(dd, eq, fs, div);
if (ret) if (ret)
goto done; goto done;
...@@ -1102,15 +1099,15 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd) ...@@ -1102,15 +1099,15 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
pcie_pset = default_pset; pcie_pset = default_pset;
if (pcie_pset > 10) { /* valid range is 0-10, inclusive */ if (pcie_pset > 10) { /* valid range is 0-10, inclusive */
dd_dev_err(dd, "%s: Invalid Eq Pset %u, setting to %d\n", dd_dev_err(dd, "%s: Invalid Eq Pset %u, setting to %d\n",
__func__, pcie_pset, default_pset); __func__, pcie_pset, default_pset);
pcie_pset = default_pset; pcie_pset = default_pset;
} }
dd_dev_info(dd, "%s: using EQ Pset %u\n", __func__, pcie_pset); dd_dev_info(dd, "%s: using EQ Pset %u\n", __func__, pcie_pset);
pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL106, pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL106,
((1 << pcie_pset) ((1 << pcie_pset) <<
<< PCIE_CFG_REG_PL106_GEN3_EQ_PSET_REQ_VEC_SHIFT) PCIE_CFG_REG_PL106_GEN3_EQ_PSET_REQ_VEC_SHIFT) |
| PCIE_CFG_REG_PL106_GEN3_EQ_EVAL2MS_DISABLE_SMASK PCIE_CFG_REG_PL106_GEN3_EQ_EVAL2MS_DISABLE_SMASK |
| PCIE_CFG_REG_PL106_GEN3_EQ_PHASE23_EXIT_MODE_SMASK); PCIE_CFG_REG_PL106_GEN3_EQ_PHASE23_EXIT_MODE_SMASK);
/* /*
* step 5b: Do post firmware download steps via SBus * step 5b: Do post firmware download steps via SBus
...@@ -1165,13 +1162,13 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd) ...@@ -1165,13 +1162,13 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
parent = dd->pcidev->bus->self; parent = dd->pcidev->bus->self;
pcie_capability_read_word(parent, PCI_EXP_LNKCTL2, &lnkctl2); pcie_capability_read_word(parent, PCI_EXP_LNKCTL2, &lnkctl2);
dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__, dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
(u32)lnkctl2); (u32)lnkctl2);
/* only write to parent if target is not as high as ours */ /* only write to parent if target is not as high as ours */
if ((lnkctl2 & LNKCTL2_TARGET_LINK_SPEED_MASK) < target_vector) { if ((lnkctl2 & LNKCTL2_TARGET_LINK_SPEED_MASK) < target_vector) {
lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK; lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK;
lnkctl2 |= target_vector; lnkctl2 |= target_vector;
dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__, dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
(u32)lnkctl2); (u32)lnkctl2);
pcie_capability_write_word(parent, PCI_EXP_LNKCTL2, lnkctl2); pcie_capability_write_word(parent, PCI_EXP_LNKCTL2, lnkctl2);
} else { } else {
dd_dev_info(dd, "%s: ..target speed is OK\n", __func__); dd_dev_info(dd, "%s: ..target speed is OK\n", __func__);
...@@ -1180,11 +1177,11 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd) ...@@ -1180,11 +1177,11 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
dd_dev_info(dd, "%s: setting target link speed\n", __func__); dd_dev_info(dd, "%s: setting target link speed\n", __func__);
pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL2, &lnkctl2); pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL2, &lnkctl2);
dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__, dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
(u32)lnkctl2); (u32)lnkctl2);
lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK; lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK;
lnkctl2 |= target_vector; lnkctl2 |= target_vector;
dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__, dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
(u32)lnkctl2); (u32)lnkctl2);
pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL2, lnkctl2); pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL2, lnkctl2);
/* step 5h: arm gasket logic */ /* step 5h: arm gasket logic */
...@@ -1221,8 +1218,8 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd) ...@@ -1221,8 +1218,8 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
ret = pci_read_config_word(dd->pcidev, PCI_VENDOR_ID, &vendor); ret = pci_read_config_word(dd->pcidev, PCI_VENDOR_ID, &vendor);
if (ret) { if (ret) {
dd_dev_info(dd, dd_dev_info(dd,
"%s: read of VendorID failed after SBR, err %d\n", "%s: read of VendorID failed after SBR, err %d\n",
__func__, ret); __func__, ret);
return_error = 1; return_error = 1;
goto done; goto done;
} }
...@@ -1273,8 +1270,8 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd) ...@@ -1273,8 +1270,8 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
& ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_MASK; & ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_MASK;
if ((status & (1 << dd->hfi1_id)) == 0) { if ((status & (1 << dd->hfi1_id)) == 0) {
dd_dev_err(dd, dd_dev_err(dd,
"%s: gasket status 0x%x, expecting 0x%x\n", "%s: gasket status 0x%x, expecting 0x%x\n",
__func__, status, 1 << dd->hfi1_id); __func__, status, 1 << dd->hfi1_id);
ret = -EIO; ret = -EIO;
goto done; goto done;
} }
...@@ -1291,13 +1288,13 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd) ...@@ -1291,13 +1288,13 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
/* update our link information cache */ /* update our link information cache */
update_lbus_info(dd); update_lbus_info(dd);
dd_dev_info(dd, "%s: new speed and width: %s\n", __func__, dd_dev_info(dd, "%s: new speed and width: %s\n", __func__,
dd->lbus_info); dd->lbus_info);
if (dd->lbus_speed != target_speed) { /* not target */ if (dd->lbus_speed != target_speed) { /* not target */
/* maybe retry */ /* maybe retry */
do_retry = retry_count < pcie_retry; do_retry = retry_count < pcie_retry;
dd_dev_err(dd, "PCIe link speed did not switch to Gen%d%s\n", dd_dev_err(dd, "PCIe link speed did not switch to Gen%d%s\n",
pcie_target, do_retry ? ", retrying" : ""); pcie_target, do_retry ? ", retrying" : "");
retry_count++; retry_count++;
if (do_retry) { if (do_retry) {
msleep(100); /* allow time to settle */ msleep(100); /* allow time to settle */
......
...@@ -511,7 +511,7 @@ static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context) ...@@ -511,7 +511,7 @@ static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context)
sci = &dd->send_contexts[sw_index]; sci = &dd->send_contexts[sw_index];
if (!sci->allocated) { if (!sci->allocated) {
dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n", dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n",
__func__, sw_index, hw_context); __func__, sw_index, hw_context);
} }
sci->allocated = 0; sci->allocated = 0;
dd->hw_to_sw[hw_context] = INVALID_SCI; dd->hw_to_sw[hw_context] = INVALID_SCI;
...@@ -627,7 +627,7 @@ void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold) ...@@ -627,7 +627,7 @@ void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold)
& SC(CREDIT_CTRL_THRESHOLD_MASK)) & SC(CREDIT_CTRL_THRESHOLD_MASK))
<< SC(CREDIT_CTRL_THRESHOLD_SHIFT)); << SC(CREDIT_CTRL_THRESHOLD_SHIFT));
write_kctxt_csr(sc->dd, sc->hw_context, write_kctxt_csr(sc->dd, sc->hw_context,
SC(CREDIT_CTRL), sc->credit_ctrl); SC(CREDIT_CTRL), sc->credit_ctrl);
/* force a credit return on change to avoid a possible stall */ /* force a credit return on change to avoid a possible stall */
force_return = 1; force_return = 1;
...@@ -765,9 +765,9 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type, ...@@ -765,9 +765,9 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
/* set the default partition key */ /* set the default partition key */
write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY),
(DEFAULT_PKEY & (DEFAULT_PKEY &
SC(CHECK_PARTITION_KEY_VALUE_MASK)) SC(CHECK_PARTITION_KEY_VALUE_MASK)) <<
<< SC(CHECK_PARTITION_KEY_VALUE_SHIFT)); SC(CHECK_PARTITION_KEY_VALUE_SHIFT));
/* per context type checks */ /* per context type checks */
if (type == SC_USER) { if (type == SC_USER) {
...@@ -780,8 +780,8 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type, ...@@ -780,8 +780,8 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
/* set the send context check opcode mask and value */ /* set the send context check opcode mask and value */
write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE),
((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) | ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) |
((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT))); ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT)));
/* set up credit return */ /* set up credit return */
reg = pa & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK); reg = pa & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK);
...@@ -799,7 +799,7 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type, ...@@ -799,7 +799,7 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
thresh = sc_percent_to_threshold(sc, 50); thresh = sc_percent_to_threshold(sc, 50);
} else if (type == SC_USER) { } else if (type == SC_USER) {
thresh = sc_percent_to_threshold(sc, thresh = sc_percent_to_threshold(sc,
user_credit_return_threshold); user_credit_return_threshold);
} else { /* kernel */ } else { /* kernel */
thresh = sc_mtu_to_threshold(sc, hfi1_max_mtu, hdrqentsize); thresh = sc_mtu_to_threshold(sc, hfi1_max_mtu, hdrqentsize);
} }
...@@ -972,11 +972,11 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause) ...@@ -972,11 +972,11 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
if (loop > 500) { if (loop > 500) {
/* timed out - bounce the link */ /* timed out - bounce the link */
dd_dev_err(dd, dd_dev_err(dd,
"%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n", "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
__func__, sc->sw_index, __func__, sc->sw_index,
sc->hw_context, (u32)reg); sc->hw_context, (u32)reg);
queue_work(dd->pport->hfi1_wq, queue_work(dd->pport->hfi1_wq,
&dd->pport->link_bounce_work); &dd->pport->link_bounce_work);
break; break;
} }
loop++; loop++;
...@@ -1022,7 +1022,7 @@ int sc_restart(struct send_context *sc) ...@@ -1022,7 +1022,7 @@ int sc_restart(struct send_context *sc)
return -EINVAL; return -EINVAL;
dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index, dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index,
sc->hw_context); sc->hw_context);
/* /*
* Step 1: Wait for the context to actually halt. * Step 1: Wait for the context to actually halt.
...@@ -1037,7 +1037,7 @@ int sc_restart(struct send_context *sc) ...@@ -1037,7 +1037,7 @@ int sc_restart(struct send_context *sc)
break; break;
if (loop > 100) { if (loop > 100) {
dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n", dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n",
__func__, sc->sw_index, sc->hw_context); __func__, sc->sw_index, sc->hw_context);
return -ETIME; return -ETIME;
} }
loop++; loop++;
...@@ -1063,9 +1063,9 @@ int sc_restart(struct send_context *sc) ...@@ -1063,9 +1063,9 @@ int sc_restart(struct send_context *sc)
break; break;
if (loop > 100) { if (loop > 100) {
dd_dev_err(dd, dd_dev_err(dd,
"%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n", "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n",
__func__, sc->sw_index, __func__, sc->sw_index,
sc->hw_context, count); sc->hw_context, count);
} }
loop++; loop++;
udelay(1); udelay(1);
...@@ -1178,18 +1178,18 @@ void pio_reset_all(struct hfi1_devdata *dd) ...@@ -1178,18 +1178,18 @@ void pio_reset_all(struct hfi1_devdata *dd)
if (ret == -EIO) { if (ret == -EIO) {
/* clear the error */ /* clear the error */
write_csr(dd, SEND_PIO_ERR_CLEAR, write_csr(dd, SEND_PIO_ERR_CLEAR,
SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK); SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK);
} }
/* reset init all */ /* reset init all */
write_csr(dd, SEND_PIO_INIT_CTXT, write_csr(dd, SEND_PIO_INIT_CTXT,
SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK); SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK);
udelay(2); udelay(2);
ret = pio_init_wait_progress(dd); ret = pio_init_wait_progress(dd);
if (ret < 0) { if (ret < 0) {
dd_dev_err(dd, dd_dev_err(dd,
"PIO send context init %s while initializing all PIO blocks\n", "PIO send context init %s while initializing all PIO blocks\n",
ret == -ETIMEDOUT ? "is stuck" : "had an error"); ret == -ETIMEDOUT ? "is stuck" : "had an error");
} }
} }
...@@ -1237,8 +1237,7 @@ int sc_enable(struct send_context *sc) ...@@ -1237,8 +1237,7 @@ int sc_enable(struct send_context *sc)
*/ */
reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS)); reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS));
if (reg) if (reg)
write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg);
reg);
/* /*
* The HW PIO initialization engine can handle only one init * The HW PIO initialization engine can handle only one init
...@@ -1296,7 +1295,7 @@ void sc_return_credits(struct send_context *sc) ...@@ -1296,7 +1295,7 @@ void sc_return_credits(struct send_context *sc)
/* a 0->1 transition schedules a credit return */ /* a 0->1 transition schedules a credit return */
write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE),
SC(CREDIT_FORCE_FORCE_RETURN_SMASK)); SC(CREDIT_FORCE_FORCE_RETURN_SMASK));
/* /*
* Ensure that the write is flushed and the credit return is * Ensure that the write is flushed and the credit return is
* scheduled. We care more about the 0 -> 1 transition. * scheduled. We care more about the 0 -> 1 transition.
...@@ -1322,7 +1321,7 @@ void sc_drop(struct send_context *sc) ...@@ -1322,7 +1321,7 @@ void sc_drop(struct send_context *sc)
return; return;
dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n", dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n",
__func__, sc->sw_index, sc->hw_context); __func__, sc->sw_index, sc->hw_context);
} }
/* /*
...@@ -1472,7 +1471,7 @@ void sc_add_credit_return_intr(struct send_context *sc) ...@@ -1472,7 +1471,7 @@ void sc_add_credit_return_intr(struct send_context *sc)
if (sc->credit_intr_count == 0) { if (sc->credit_intr_count == 0) {
sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK); sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
write_kctxt_csr(sc->dd, sc->hw_context, write_kctxt_csr(sc->dd, sc->hw_context,
SC(CREDIT_CTRL), sc->credit_ctrl); SC(CREDIT_CTRL), sc->credit_ctrl);
} }
sc->credit_intr_count++; sc->credit_intr_count++;
spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
...@@ -1494,7 +1493,7 @@ void sc_del_credit_return_intr(struct send_context *sc) ...@@ -1494,7 +1493,7 @@ void sc_del_credit_return_intr(struct send_context *sc)
if (sc->credit_intr_count == 0) { if (sc->credit_intr_count == 0) {
sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK); sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
write_kctxt_csr(sc->dd, sc->hw_context, write_kctxt_csr(sc->dd, sc->hw_context,
SC(CREDIT_CTRL), sc->credit_ctrl); SC(CREDIT_CTRL), sc->credit_ctrl);
} }
spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
} }
...@@ -1667,7 +1666,7 @@ void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context) ...@@ -1667,7 +1666,7 @@ void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context)
sw_index = dd->hw_to_sw[hw_context]; sw_index = dd->hw_to_sw[hw_context];
if (unlikely(sw_index >= dd->num_send_contexts)) { if (unlikely(sw_index >= dd->num_send_contexts)) {
dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n", dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n",
__func__, hw_context, sw_index); __func__, hw_context, sw_index);
goto done; goto done;
} }
sc = dd->send_contexts[sw_index].sc; sc = dd->send_contexts[sw_index].sc;
...@@ -1680,8 +1679,8 @@ void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context) ...@@ -1680,8 +1679,8 @@ void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context)
sw_index = dd->hw_to_sw[gc]; sw_index = dd->hw_to_sw[gc];
if (unlikely(sw_index >= dd->num_send_contexts)) { if (unlikely(sw_index >= dd->num_send_contexts)) {
dd_dev_err(dd, dd_dev_err(dd,
"%s: invalid hw (%u) to sw (%u) mapping\n", "%s: invalid hw (%u) to sw (%u) mapping\n",
__func__, hw_context, sw_index); __func__, hw_context, sw_index);
continue; continue;
} }
sc_release_update(dd->send_contexts[sw_index].sc); sc_release_update(dd->send_contexts[sw_index].sc);
...@@ -2009,8 +2008,8 @@ int init_credit_return(struct hfi1_devdata *dd) ...@@ -2009,8 +2008,8 @@ int init_credit_return(struct hfi1_devdata *dd)
if (!dd->cr_base[i].va) { if (!dd->cr_base[i].va) {
set_dev_node(&dd->pcidev->dev, dd->node); set_dev_node(&dd->pcidev->dev, dd->node);
dd_dev_err(dd, dd_dev_err(dd,
"Unable to allocate credit return DMA range for NUMA %d\n", "Unable to allocate credit return DMA range for NUMA %d\n",
i); i);
ret = -ENOMEM; ret = -ENOMEM;
goto done; goto done;
} }
...@@ -2034,10 +2033,10 @@ void free_credit_return(struct hfi1_devdata *dd) ...@@ -2034,10 +2033,10 @@ void free_credit_return(struct hfi1_devdata *dd)
for (i = 0; i < num_numa; i++) { for (i = 0; i < num_numa; i++) {
if (dd->cr_base[i].va) { if (dd->cr_base[i].va) {
dma_free_coherent(&dd->pcidev->dev, dma_free_coherent(&dd->pcidev->dev,
TXE_NUM_CONTEXTS TXE_NUM_CONTEXTS *
* sizeof(struct credit_return), sizeof(struct credit_return),
dd->cr_base[i].va, dd->cr_base[i].va,
dd->cr_base[i].pa); dd->cr_base[i].pa);
} }
} }
kfree(dd->cr_base); kfree(dd->cr_base);
......
...@@ -289,7 +289,7 @@ void sc_flush(struct send_context *sc); ...@@ -289,7 +289,7 @@ void sc_flush(struct send_context *sc);
void sc_drop(struct send_context *sc); void sc_drop(struct send_context *sc);
void sc_stop(struct send_context *sc, int bit); void sc_stop(struct send_context *sc, int bit);
struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len, struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
pio_release_cb cb, void *arg); pio_release_cb cb, void *arg);
void sc_release_update(struct send_context *sc); void sc_release_update(struct send_context *sc);
void sc_return_credits(struct send_context *sc); void sc_return_credits(struct send_context *sc);
void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context); void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context);
...@@ -322,7 +322,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op); ...@@ -322,7 +322,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op);
void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc, void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
const void *from, size_t count); const void *from, size_t count);
void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc, void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc,
const void *from, size_t nbytes); const void *from, size_t nbytes);
void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes); void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes);
void seg_pio_copy_end(struct pio_buf *pbuf); void seg_pio_copy_end(struct pio_buf *pbuf);
......
...@@ -200,7 +200,7 @@ void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc, ...@@ -200,7 +200,7 @@ void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
* o nbytes must not span a QW boundary * o nbytes must not span a QW boundary
*/ */
static inline void read_low_bytes(struct pio_buf *pbuf, const void *from, static inline void read_low_bytes(struct pio_buf *pbuf, const void *from,
unsigned int nbytes) unsigned int nbytes)
{ {
unsigned long off; unsigned long off;
...@@ -227,7 +227,7 @@ static inline void read_low_bytes(struct pio_buf *pbuf, const void *from, ...@@ -227,7 +227,7 @@ static inline void read_low_bytes(struct pio_buf *pbuf, const void *from,
* o nbytes may span a QW boundary * o nbytes may span a QW boundary
*/ */
static inline void read_extra_bytes(struct pio_buf *pbuf, static inline void read_extra_bytes(struct pio_buf *pbuf,
const void *from, unsigned int nbytes) const void *from, unsigned int nbytes)
{ {
unsigned long off = (unsigned long)from & 0x7; unsigned long off = (unsigned long)from & 0x7;
unsigned int room, xbytes; unsigned int room, xbytes;
...@@ -366,7 +366,7 @@ static inline void jcopy(u8 *dest, const u8 *src, u32 n) ...@@ -366,7 +366,7 @@ static inline void jcopy(u8 *dest, const u8 *src, u32 n)
* o from may _not_ be u64 aligned. * o from may _not_ be u64 aligned.
*/ */
static inline void read_low_bytes(struct pio_buf *pbuf, const void *from, static inline void read_low_bytes(struct pio_buf *pbuf, const void *from,
unsigned int nbytes) unsigned int nbytes)
{ {
jcopy(&pbuf->carry.val8[0], from, nbytes); jcopy(&pbuf->carry.val8[0], from, nbytes);
pbuf->carry_bytes = nbytes; pbuf->carry_bytes = nbytes;
...@@ -381,7 +381,7 @@ static inline void read_low_bytes(struct pio_buf *pbuf, const void *from, ...@@ -381,7 +381,7 @@ static inline void read_low_bytes(struct pio_buf *pbuf, const void *from,
* o nbytes may span a QW boundary * o nbytes may span a QW boundary
*/ */
static inline void read_extra_bytes(struct pio_buf *pbuf, static inline void read_extra_bytes(struct pio_buf *pbuf,
const void *from, unsigned int nbytes) const void *from, unsigned int nbytes)
{ {
jcopy(&pbuf->carry.val8[pbuf->carry_bytes], from, nbytes); jcopy(&pbuf->carry.val8[pbuf->carry_bytes], from, nbytes);
pbuf->carry_bytes += nbytes; pbuf->carry_bytes += nbytes;
...@@ -437,7 +437,7 @@ static inline int carry_write8(struct pio_buf *pbuf, void *dest) ...@@ -437,7 +437,7 @@ static inline int carry_write8(struct pio_buf *pbuf, void *dest)
u64 zero = 0; u64 zero = 0;
jcopy(&pbuf->carry.val8[pbuf->carry_bytes], (u8 *)&zero, jcopy(&pbuf->carry.val8[pbuf->carry_bytes], (u8 *)&zero,
8 - pbuf->carry_bytes); 8 - pbuf->carry_bytes);
writeq(pbuf->carry.val64, dest); writeq(pbuf->carry.val64, dest);
return 1; return 1;
} }
...@@ -457,7 +457,7 @@ static inline int carry_write8(struct pio_buf *pbuf, void *dest) ...@@ -457,7 +457,7 @@ static inline int carry_write8(struct pio_buf *pbuf, void *dest)
* @nbytes: bytes to copy * @nbytes: bytes to copy
*/ */
void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc, void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc,
const void *from, size_t nbytes) const void *from, size_t nbytes)
{ {
void __iomem *dest = pbuf->start + SOP_DISTANCE; void __iomem *dest = pbuf->start + SOP_DISTANCE;
void __iomem *send = dest + PIO_BLOCK_SIZE; void __iomem *send = dest + PIO_BLOCK_SIZE;
...@@ -647,7 +647,7 @@ static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes) ...@@ -647,7 +647,7 @@ static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes)
* Must handle nbytes < 8. * Must handle nbytes < 8.
*/ */
static void mid_copy_straight(struct pio_buf *pbuf, static void mid_copy_straight(struct pio_buf *pbuf,
const void *from, size_t nbytes) const void *from, size_t nbytes)
{ {
void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64)); void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
void __iomem *dend; /* 8-byte data end */ void __iomem *dend; /* 8-byte data end */
......
...@@ -468,7 +468,7 @@ int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len, ...@@ -468,7 +468,7 @@ int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
if (port_num > dd->num_pports || port_num < 1) { if (port_num > dd->num_pports || port_num < 1) {
dd_dev_info(dd, "%s: Invalid port number %d\n", dd_dev_info(dd, "%s: Invalid port number %d\n",
__func__, port_num); __func__, port_num);
ret = -EINVAL; ret = -EINVAL;
goto set_zeroes; goto set_zeroes;
} }
......
...@@ -1773,8 +1773,8 @@ static inline void rc_cancel_ack(struct rvt_qp *qp) ...@@ -1773,8 +1773,8 @@ static inline void rc_cancel_ack(struct rvt_qp *qp)
* schedule a response to be sent. * schedule a response to be sent.
*/ */
static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data, static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
struct rvt_qp *qp, u32 opcode, u32 psn, int diff, struct rvt_qp *qp, u32 opcode, u32 psn,
struct hfi1_ctxtdata *rcd) int diff, struct hfi1_ctxtdata *rcd)
{ {
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
struct rvt_ack_entry *e; struct rvt_ack_entry *e;
......
...@@ -283,9 +283,10 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr, ...@@ -283,9 +283,10 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix, if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix,
guid)) guid))
goto err; goto err;
if (!gid_ok(&hdr->u.l.grh.sgid, if (!gid_ok(
qp->alt_ah_attr.grh.dgid.global.subnet_prefix, &hdr->u.l.grh.sgid,
qp->alt_ah_attr.grh.dgid.global.interface_id)) qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
qp->alt_ah_attr.grh.dgid.global.interface_id))
goto err; goto err;
} }
if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0, if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0,
...@@ -317,9 +318,10 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr, ...@@ -317,9 +318,10 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix, if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix,
guid)) guid))
goto err; goto err;
if (!gid_ok(&hdr->u.l.grh.sgid, if (!gid_ok(
qp->remote_ah_attr.grh.dgid.global.subnet_prefix, &hdr->u.l.grh.sgid,
qp->remote_ah_attr.grh.dgid.global.interface_id)) qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
qp->remote_ah_attr.grh.dgid.global.interface_id))
goto err; goto err;
} }
if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0, if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0,
......
This diff is collapsed.
...@@ -61,8 +61,8 @@ ...@@ -61,8 +61,8 @@
* Congestion control table size followed by table entries * Congestion control table size followed by table entries
*/ */
static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj, static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count) char *buf, loff_t pos, size_t count)
{ {
int ret; int ret;
struct hfi1_pportdata *ppd = struct hfi1_pportdata *ppd =
...@@ -110,8 +110,8 @@ static struct bin_attribute cc_table_bin_attr = { ...@@ -110,8 +110,8 @@ static struct bin_attribute cc_table_bin_attr = {
* trigger threshold and the minimum injection rate delay. * trigger threshold and the minimum injection rate delay.
*/ */
static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj, static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count) char *buf, loff_t pos, size_t count)
{ {
int ret; int ret;
struct hfi1_pportdata *ppd = struct hfi1_pportdata *ppd =
...@@ -550,7 +550,7 @@ static ssize_t show_nctxts(struct device *device, ...@@ -550,7 +550,7 @@ static ssize_t show_nctxts(struct device *device,
} }
static ssize_t show_nfreectxts(struct device *device, static ssize_t show_nfreectxts(struct device *device,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct hfi1_ibdev *dev = struct hfi1_ibdev *dev =
container_of(device, struct hfi1_ibdev, rdi.ibdev.dev); container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
...@@ -660,8 +660,8 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, ...@@ -660,8 +660,8 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
if (!port_num || port_num > dd->num_pports) { if (!port_num || port_num > dd->num_pports) {
dd_dev_err(dd, dd_dev_err(dd,
"Skipping infiniband class with invalid port %u\n", "Skipping infiniband class with invalid port %u\n",
port_num); port_num);
return -ENODEV; return -ENODEV;
} }
ppd = &dd->pport[port_num - 1]; ppd = &dd->pport[port_num - 1];
...@@ -700,34 +700,32 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, ...@@ -700,34 +700,32 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
kobj, "CCMgtA"); kobj, "CCMgtA");
if (ret) { if (ret) {
dd_dev_err(dd, dd_dev_err(dd,
"Skipping Congestion Control sysfs info, (err %d) port %u\n", "Skipping Congestion Control sysfs info, (err %d) port %u\n",
ret, port_num); ret, port_num);
goto bail_vl2mtu; goto bail_vl2mtu;
} }
kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD); kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr);
&cc_setting_bin_attr);
if (ret) { if (ret) {
dd_dev_err(dd, dd_dev_err(dd,
"Skipping Congestion Control setting sysfs info, (err %d) port %u\n", "Skipping Congestion Control setting sysfs info, (err %d) port %u\n",
ret, port_num); ret, port_num);
goto bail_cc; goto bail_cc;
} }
ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, &cc_table_bin_attr);
&cc_table_bin_attr);
if (ret) { if (ret) {
dd_dev_err(dd, dd_dev_err(dd,
"Skipping Congestion Control table sysfs info, (err %d) port %u\n", "Skipping Congestion Control table sysfs info, (err %d) port %u\n",
ret, port_num); ret, port_num);
goto bail_cc_entry_bin; goto bail_cc_entry_bin;
} }
dd_dev_info(dd, dd_dev_info(dd,
"IB%u: Congestion Control Agent enabled for port %d\n", "IB%u: Congestion Control Agent enabled for port %d\n",
dd->unit, port_num); dd->unit, port_num);
return 0; return 0;
......
...@@ -109,17 +109,17 @@ const char *parse_everbs_hdrs( ...@@ -109,17 +109,17 @@ const char *parse_everbs_hdrs(
case OP(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE): case OP(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE):
case OP(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE): case OP(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE):
trace_seq_printf(p, IMM_PRN, trace_seq_printf(p, IMM_PRN,
be32_to_cpu(eh->imm_data)); be32_to_cpu(eh->imm_data));
break; break;
/* reth + imm */ /* reth + imm */
case OP(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE): case OP(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE):
case OP(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE): case OP(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE):
trace_seq_printf(p, RETH_PRN " " IMM_PRN, trace_seq_printf(p, RETH_PRN " " IMM_PRN,
(unsigned long long)ib_u64_get( (unsigned long long)ib_u64_get(
(__be32 *)&eh->rc.reth.vaddr), (__be32 *)&eh->rc.reth.vaddr),
be32_to_cpu(eh->rc.reth.rkey), be32_to_cpu(eh->rc.reth.rkey),
be32_to_cpu(eh->rc.reth.length), be32_to_cpu(eh->rc.reth.length),
be32_to_cpu(eh->rc.imm_data)); be32_to_cpu(eh->rc.imm_data));
break; break;
/* reth */ /* reth */
case OP(RC, RDMA_READ_REQUEST): case OP(RC, RDMA_READ_REQUEST):
...@@ -128,10 +128,10 @@ const char *parse_everbs_hdrs( ...@@ -128,10 +128,10 @@ const char *parse_everbs_hdrs(
case OP(RC, RDMA_WRITE_ONLY): case OP(RC, RDMA_WRITE_ONLY):
case OP(UC, RDMA_WRITE_ONLY): case OP(UC, RDMA_WRITE_ONLY):
trace_seq_printf(p, RETH_PRN, trace_seq_printf(p, RETH_PRN,
(unsigned long long)ib_u64_get( (unsigned long long)ib_u64_get(
(__be32 *)&eh->rc.reth.vaddr), (__be32 *)&eh->rc.reth.vaddr),
be32_to_cpu(eh->rc.reth.rkey), be32_to_cpu(eh->rc.reth.rkey),
be32_to_cpu(eh->rc.reth.length)); be32_to_cpu(eh->rc.reth.length));
break; break;
case OP(RC, RDMA_READ_RESPONSE_FIRST): case OP(RC, RDMA_READ_RESPONSE_FIRST):
case OP(RC, RDMA_READ_RESPONSE_LAST): case OP(RC, RDMA_READ_RESPONSE_LAST):
...@@ -154,19 +154,20 @@ const char *parse_everbs_hdrs( ...@@ -154,19 +154,20 @@ const char *parse_everbs_hdrs(
case OP(RC, COMPARE_SWAP): case OP(RC, COMPARE_SWAP):
case OP(RC, FETCH_ADD): case OP(RC, FETCH_ADD):
trace_seq_printf(p, ATOMICETH_PRN, trace_seq_printf(p, ATOMICETH_PRN,
(unsigned long long)ib_u64_get(eh->atomic_eth.vaddr), (unsigned long long)ib_u64_get(
eh->atomic_eth.rkey, eh->atomic_eth.vaddr),
(unsigned long long)ib_u64_get( eh->atomic_eth.rkey,
(__be32 *)&eh->atomic_eth.swap_data), (unsigned long long)ib_u64_get(
(unsigned long long)ib_u64_get( (__be32 *)&eh->atomic_eth.swap_data),
(unsigned long long)ib_u64_get(
(__be32 *)&eh->atomic_eth.compare_data)); (__be32 *)&eh->atomic_eth.compare_data));
break; break;
/* deth */ /* deth */
case OP(UD, SEND_ONLY): case OP(UD, SEND_ONLY):
case OP(UD, SEND_ONLY_WITH_IMMEDIATE): case OP(UD, SEND_ONLY_WITH_IMMEDIATE):
trace_seq_printf(p, DETH_PRN, trace_seq_printf(p, DETH_PRN,
be32_to_cpu(eh->ud.deth[0]), be32_to_cpu(eh->ud.deth[0]),
be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK); be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK);
break; break;
} }
trace_seq_putc(p, 0); trace_seq_putc(p, 0);
...@@ -187,12 +188,12 @@ const char *parse_sdma_flags( ...@@ -187,12 +188,12 @@ const char *parse_sdma_flags(
trace_seq_printf(p, "%s", flags); trace_seq_printf(p, "%s", flags);
if (desc0 & SDMA_DESC0_FIRST_DESC_FLAG) if (desc0 & SDMA_DESC0_FIRST_DESC_FLAG)
trace_seq_printf(p, " amode:%u aidx:%u alen:%u", trace_seq_printf(p, " amode:%u aidx:%u alen:%u",
(u8)((desc1 >> SDMA_DESC1_HEADER_MODE_SHIFT) (u8)((desc1 >> SDMA_DESC1_HEADER_MODE_SHIFT) &
& SDMA_DESC1_HEADER_MODE_MASK), SDMA_DESC1_HEADER_MODE_MASK),
(u8)((desc1 >> SDMA_DESC1_HEADER_INDEX_SHIFT) (u8)((desc1 >> SDMA_DESC1_HEADER_INDEX_SHIFT) &
& SDMA_DESC1_HEADER_INDEX_MASK), SDMA_DESC1_HEADER_INDEX_MASK),
(u8)((desc1 >> SDMA_DESC1_HEADER_DWS_SHIFT) (u8)((desc1 >> SDMA_DESC1_HEADER_DWS_SHIFT) &
& SDMA_DESC1_HEADER_DWS_MASK)); SDMA_DESC1_HEADER_DWS_MASK));
return ret; return ret;
} }
......
This diff is collapsed.
...@@ -131,7 +131,7 @@ static void scl_out(struct hfi1_devdata *dd, u32 target, u8 bit) ...@@ -131,7 +131,7 @@ static void scl_out(struct hfi1_devdata *dd, u32 target, u8 bit)
} }
if (rise_usec <= 0) if (rise_usec <= 0)
dd_dev_err(dd, "SCL interface stuck low > %d uSec\n", dd_dev_err(dd, "SCL interface stuck low > %d uSec\n",
SCL_WAIT_USEC); SCL_WAIT_USEC);
} }
i2c_wait_for_writes(dd, target); i2c_wait_for_writes(dd, target);
} }
......
...@@ -318,7 +318,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet) ...@@ -318,7 +318,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
sl = ibp->sc_to_sl[sc5]; sl = ibp->sc_to_sl[sc5];
process_becn(ppd, sl, rlid, lqpn, rqpn, process_becn(ppd, sl, rlid, lqpn, rqpn,
IB_CC_SVCTYPE_UC); IB_CC_SVCTYPE_UC);
} }
if (bth1 & HFI1_FECN_SMASK) { if (bth1 & HFI1_FECN_SMASK) {
......
...@@ -320,9 +320,10 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -320,9 +320,10 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (ah_attr->dlid < be16_to_cpu(IB_MULTICAST_LID_BASE) || if (ah_attr->dlid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
ah_attr->dlid == be16_to_cpu(IB_LID_PERMISSIVE)) { ah_attr->dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1); lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
if (unlikely(!loopback && (lid == ppd->lid || if (unlikely(!loopback &&
(lid == be16_to_cpu(IB_LID_PERMISSIVE) && (lid == ppd->lid ||
qp->ibqp.qp_type == IB_QPT_GSI)))) { (lid == be16_to_cpu(IB_LID_PERMISSIVE) &&
qp->ibqp.qp_type == IB_QPT_GSI)))) {
unsigned long flags; unsigned long flags;
/* /*
* If DMAs are in progress, we can't generate * If DMAs are in progress, we can't generate
......
...@@ -728,7 +728,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, ...@@ -728,7 +728,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
} }
static inline u32 compute_data_length(struct user_sdma_request *req, static inline u32 compute_data_length(struct user_sdma_request *req,
struct user_sdma_txreq *tx) struct user_sdma_txreq *tx)
{ {
/* /*
* Determine the proper size of the packet data. * Determine the proper size of the packet data.
......
...@@ -970,7 +970,8 @@ static inline int egress_pkey_check(struct hfi1_pportdata *ppd, ...@@ -970,7 +970,8 @@ static inline int egress_pkey_check(struct hfi1_pportdata *ppd,
/* The most likely matching pkey has index qp->s_pkey_index */ /* The most likely matching pkey has index qp->s_pkey_index */
if (unlikely(!egress_pkey_matches_entry(pkey, if (unlikely(!egress_pkey_matches_entry(pkey,
ppd->pkeys[qp->s_pkey_index]))) { ppd->pkeys
[qp->s_pkey_index]))) {
/* no match - try the entire table */ /* no match - try the entire table */
for (; i < MAX_PKEY_VALUES; i++) { for (; i < MAX_PKEY_VALUES; i++) {
if (egress_pkey_matches_entry(pkey, ppd->pkeys[i])) if (egress_pkey_matches_entry(pkey, ppd->pkeys[i]))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment