Commit b59c9457 authored by Sreenivasa Honnur's avatar Sreenivasa Honnur Committed by David S. Miller

vxge: Fixed MSIX interrupt configuration.

- Fixed MSIX interrupt configuration to support non contiguous vpaths in
  functions. Four msi-x vectors are reserved per vpath internal to the chip.
  In all, there are 68 msi-x vectors for the 17 vpaths in the chip. In the
  multi function configurations, non-contiguous vpaths are configured to
  represent a function. For instance vpaths 0 and 8 can be configured to
  represent function zero.

- If pci_enable_msix fails for the requested vectors, try with a lesser number
  vectors by reducing the vpath count.
Signed-off-by: default avatarSreenivasa Honnur <sreenivasa.honnur@neterion.com>
Signed-off-by: default avatarRamkrishna Vepa <ram.vepa@neterion.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4c5f3b21
...@@ -1363,28 +1363,26 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p) ...@@ -1363,28 +1363,26 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id) void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
{ {
struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
int msix_id, alarm_msix_id; int msix_id = 0;
int tim_msix_id[4] = {[0 ...3] = 0}; int tim_msix_id[4] = {0, 1, 0, 0};
int alarm_msix_id = VXGE_ALARM_MSIX_ID;
vxge_hw_vpath_intr_enable(vpath->handle); vxge_hw_vpath_intr_enable(vpath->handle);
if (vdev->config.intr_type == INTA) if (vdev->config.intr_type == INTA)
vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle); vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
else { else {
msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE;
alarm_msix_id =
VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
tim_msix_id[0] = msix_id;
tim_msix_id[1] = msix_id + 1;
vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
alarm_msix_id); alarm_msix_id);
msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
vxge_hw_vpath_msix_unmask(vpath->handle, msix_id); vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1); vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
/* enable the alarm vector */ /* enable the alarm vector */
vxge_hw_vpath_msix_unmask(vpath->handle, alarm_msix_id); msix_id = (vpath->handle->vpath->hldev->first_vp_id *
VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
} }
} }
...@@ -1405,12 +1403,13 @@ void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) ...@@ -1405,12 +1403,13 @@ void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
if (vdev->config.intr_type == INTA) if (vdev->config.intr_type == INTA)
vxge_hw_vpath_inta_mask_tx_rx(vpath->handle); vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
else { else {
msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE; msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
vxge_hw_vpath_msix_mask(vpath->handle, msix_id); vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1); vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
/* disable the alarm vector */ /* disable the alarm vector */
msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; msix_id = (vpath->handle->vpath->hldev->first_vp_id *
VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
vxge_hw_vpath_msix_mask(vpath->handle, msix_id); vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
} }
} }
...@@ -2223,19 +2222,18 @@ vxge_alarm_msix_handle(int irq, void *dev_id) ...@@ -2223,19 +2222,18 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
enum vxge_hw_status status; enum vxge_hw_status status;
struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id; struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
struct vxgedev *vdev = vpath->vdev; struct vxgedev *vdev = vpath->vdev;
int alarm_msix_id = int msix_id = (vpath->handle->vpath->vp_id *
VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
for (i = 0; i < vdev->no_of_vpath; i++) { for (i = 0; i < vdev->no_of_vpath; i++) {
vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
alarm_msix_id);
status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
vdev->exec_mode); vdev->exec_mode);
if (status == VXGE_HW_OK) { if (status == VXGE_HW_OK) {
vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
alarm_msix_id); msix_id);
continue; continue;
} }
vxge_debug_intr(VXGE_ERR, vxge_debug_intr(VXGE_ERR,
...@@ -2248,18 +2246,17 @@ vxge_alarm_msix_handle(int irq, void *dev_id) ...@@ -2248,18 +2246,17 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
static int vxge_alloc_msix(struct vxgedev *vdev) static int vxge_alloc_msix(struct vxgedev *vdev)
{ {
int j, i, ret = 0; int j, i, ret = 0;
int intr_cnt = 0; int msix_intr_vect = 0, temp;
int alarm_msix_id = 0, msix_intr_vect = 0;
vdev->intr_cnt = 0; vdev->intr_cnt = 0;
start:
/* Tx/Rx MSIX Vectors count */ /* Tx/Rx MSIX Vectors count */
vdev->intr_cnt = vdev->no_of_vpath * 2; vdev->intr_cnt = vdev->no_of_vpath * 2;
/* Alarm MSIX Vectors count */ /* Alarm MSIX Vectors count */
vdev->intr_cnt++; vdev->intr_cnt++;
intr_cnt = (vdev->max_vpath_supported * 2) + 1; vdev->entries = kzalloc(vdev->intr_cnt * sizeof(struct msix_entry),
vdev->entries = kzalloc(intr_cnt * sizeof(struct msix_entry),
GFP_KERNEL); GFP_KERNEL);
if (!vdev->entries) { if (!vdev->entries) {
vxge_debug_init(VXGE_ERR, vxge_debug_init(VXGE_ERR,
...@@ -2268,7 +2265,8 @@ static int vxge_alloc_msix(struct vxgedev *vdev) ...@@ -2268,7 +2265,8 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
return -ENOMEM; return -ENOMEM;
} }
vdev->vxge_entries = kzalloc(intr_cnt * sizeof(struct vxge_msix_entry), vdev->vxge_entries =
kzalloc(vdev->intr_cnt * sizeof(struct vxge_msix_entry),
GFP_KERNEL); GFP_KERNEL);
if (!vdev->vxge_entries) { if (!vdev->vxge_entries) {
vxge_debug_init(VXGE_ERR, "%s: memory allocation failed", vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
...@@ -2277,9 +2275,7 @@ static int vxge_alloc_msix(struct vxgedev *vdev) ...@@ -2277,9 +2275,7 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
return -ENOMEM; return -ENOMEM;
} }
/* Last vector in the list is used for alarm */ for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
alarm_msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
for (i = 0, j = 0; i < vdev->max_vpath_supported; i++) {
msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE; msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
...@@ -2297,47 +2293,31 @@ static int vxge_alloc_msix(struct vxgedev *vdev) ...@@ -2297,47 +2293,31 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
} }
/* Initialize the alarm vector */ /* Initialize the alarm vector */
vdev->entries[j].entry = alarm_msix_id; vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
vdev->vxge_entries[j].entry = alarm_msix_id; vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
vdev->vxge_entries[j].in_use = 0; vdev->vxge_entries[j].in_use = 0;
ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt); ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
/* if driver request exceeeds available irq's, request with a small
* number.
*/
if (ret > 0) {
vxge_debug_init(VXGE_ERR,
"%s: MSI-X enable failed for %d vectors, available: %d",
VXGE_DRIVER_NAME, intr_cnt, ret);
vdev->max_vpath_supported = vdev->no_of_vpath;
intr_cnt = (vdev->max_vpath_supported * 2) + 1;
/* Reset the alarm vector setting */
vdev->entries[j].entry = 0;
vdev->vxge_entries[j].entry = 0;
/* Initialize the alarm vector with new setting */
vdev->entries[intr_cnt - 1].entry = alarm_msix_id;
vdev->vxge_entries[intr_cnt - 1].entry = alarm_msix_id;
vdev->vxge_entries[intr_cnt - 1].in_use = 0;
ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt);
if (!ret)
vxge_debug_init(VXGE_ERR,
"%s: MSI-X enabled for %d vectors",
VXGE_DRIVER_NAME, intr_cnt);
}
if (ret) { if (ret > 0) {
vxge_debug_init(VXGE_ERR, vxge_debug_init(VXGE_ERR,
"%s: MSI-X enable failed for %d vectors, ret: %d", "%s: MSI-X enable failed for %d vectors, ret: %d",
VXGE_DRIVER_NAME, intr_cnt, ret); VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
kfree(vdev->entries); kfree(vdev->entries);
kfree(vdev->vxge_entries); kfree(vdev->vxge_entries);
vdev->entries = NULL; vdev->entries = NULL;
vdev->vxge_entries = NULL; vdev->vxge_entries = NULL;
if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3))
return -ENODEV; return -ENODEV;
} /* Try with less no of vector by reducing no of vpaths count */
temp = (ret - 1)/2;
vxge_close_vpaths(vdev, temp);
vdev->no_of_vpath = temp;
goto start;
} else if (ret < 0)
return -ENODEV;
return 0; return 0;
} }
...@@ -2345,43 +2325,26 @@ static int vxge_enable_msix(struct vxgedev *vdev) ...@@ -2345,43 +2325,26 @@ static int vxge_enable_msix(struct vxgedev *vdev)
{ {
int i, ret = 0; int i, ret = 0;
enum vxge_hw_status status;
/* 0 - Tx, 1 - Rx */ /* 0 - Tx, 1 - Rx */
int tim_msix_id[4]; int tim_msix_id[4] = {0, 1, 0, 0};
int alarm_msix_id = 0, msix_intr_vect = 0;
vdev->intr_cnt = 0; vdev->intr_cnt = 0;
/* allocate msix vectors */ /* allocate msix vectors */
ret = vxge_alloc_msix(vdev); ret = vxge_alloc_msix(vdev);
if (!ret) { if (!ret) {
/* Last vector in the list is used for alarm */
alarm_msix_id =
VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
for (i = 0; i < vdev->no_of_vpath; i++) { for (i = 0; i < vdev->no_of_vpath; i++) {
/* If fifo or ring are not enabled /* If fifo or ring are not enabled
the MSIX vector for that should be set to 0 the MSIX vector for that should be set to 0
Hence initializeing this array to all 0s. Hence initializeing this array to all 0s.
*/ */
memset(tim_msix_id, 0, sizeof(tim_msix_id)); vdev->vpaths[i].ring.rx_vector_no =
msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE; (vdev->vpaths[i].device_id *
tim_msix_id[0] = msix_intr_vect; VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
tim_msix_id[1] = msix_intr_vect + 1;
vdev->vpaths[i].ring.rx_vector_no = tim_msix_id[1];
status = vxge_hw_vpath_msix_set( vxge_hw_vpath_msix_set(vdev->vpaths[i].handle,
vdev->vpaths[i].handle, tim_msix_id, VXGE_ALARM_MSIX_ID);
tim_msix_id, alarm_msix_id);
if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR,
"vxge_hw_vpath_msix_set "
"failed with status : %x", status);
kfree(vdev->entries);
kfree(vdev->vxge_entries);
pci_disable_msix(vdev->pdev);
return -ENODEV;
}
} }
} }
...@@ -2392,7 +2355,7 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev) ...@@ -2392,7 +2355,7 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
{ {
int intr_cnt; int intr_cnt;
for (intr_cnt = 0; intr_cnt < (vdev->max_vpath_supported * 2 + 1); for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
intr_cnt++) { intr_cnt++) {
if (vdev->vxge_entries[intr_cnt].in_use) { if (vdev->vxge_entries[intr_cnt].in_use) {
synchronize_irq(vdev->entries[intr_cnt].vector); synchronize_irq(vdev->entries[intr_cnt].vector);
...@@ -2457,9 +2420,10 @@ static int vxge_add_isr(struct vxgedev *vdev) ...@@ -2457,9 +2420,10 @@ static int vxge_add_isr(struct vxgedev *vdev)
switch (msix_idx) { switch (msix_idx) {
case 0: case 0:
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
"%s:vxge fn: %d vpath: %d Tx MSI-X: %d", "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
vdev->ndev->name, pci_fun, vp_idx, vdev->ndev->name,
vdev->entries[intr_cnt].entry); vdev->entries[intr_cnt].entry,
pci_fun, vp_idx);
ret = request_irq( ret = request_irq(
vdev->entries[intr_cnt].vector, vdev->entries[intr_cnt].vector,
vxge_tx_msix_handle, 0, vxge_tx_msix_handle, 0,
...@@ -2471,9 +2435,10 @@ static int vxge_add_isr(struct vxgedev *vdev) ...@@ -2471,9 +2435,10 @@ static int vxge_add_isr(struct vxgedev *vdev)
break; break;
case 1: case 1:
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
"%s:vxge fn: %d vpath: %d Rx MSI-X: %d", "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
vdev->ndev->name, pci_fun, vp_idx, vdev->ndev->name,
vdev->entries[intr_cnt].entry); vdev->entries[intr_cnt].entry,
pci_fun, vp_idx);
ret = request_irq( ret = request_irq(
vdev->entries[intr_cnt].vector, vdev->entries[intr_cnt].vector,
vxge_rx_msix_napi_handle, vxge_rx_msix_napi_handle,
...@@ -2501,9 +2466,11 @@ static int vxge_add_isr(struct vxgedev *vdev) ...@@ -2501,9 +2466,11 @@ static int vxge_add_isr(struct vxgedev *vdev)
if (irq_req) { if (irq_req) {
/* We requested for this msix interrupt */ /* We requested for this msix interrupt */
vdev->vxge_entries[intr_cnt].in_use = 1; vdev->vxge_entries[intr_cnt].in_use = 1;
msix_idx += vdev->vpaths[vp_idx].device_id *
VXGE_HW_VPATH_MSIX_ACTIVE;
vxge_hw_vpath_msix_unmask( vxge_hw_vpath_msix_unmask(
vdev->vpaths[vp_idx].handle, vdev->vpaths[vp_idx].handle,
intr_idx); msix_idx);
intr_cnt++; intr_cnt++;
} }
...@@ -2513,16 +2480,17 @@ static int vxge_add_isr(struct vxgedev *vdev) ...@@ -2513,16 +2480,17 @@ static int vxge_add_isr(struct vxgedev *vdev)
vp_idx++; vp_idx++;
} }
intr_cnt = vdev->max_vpath_supported * 2; intr_cnt = vdev->no_of_vpath * 2;
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
"%s:vxge Alarm fn: %d MSI-X: %d", "%s:vxge:MSI-X %d - Alarm - fn:%d",
vdev->ndev->name, pci_fun, vdev->ndev->name,
vdev->entries[intr_cnt].entry); vdev->entries[intr_cnt].entry,
pci_fun);
/* For Alarm interrupts */ /* For Alarm interrupts */
ret = request_irq(vdev->entries[intr_cnt].vector, ret = request_irq(vdev->entries[intr_cnt].vector,
vxge_alarm_msix_handle, 0, vxge_alarm_msix_handle, 0,
vdev->desc[intr_cnt], vdev->desc[intr_cnt],
&vdev->vpaths[vp_idx]); &vdev->vpaths[0]);
if (ret) { if (ret) {
vxge_debug_init(VXGE_ERR, vxge_debug_init(VXGE_ERR,
"%s: MSIX - %d Registration failed", "%s: MSIX - %d Registration failed",
...@@ -2535,16 +2503,19 @@ static int vxge_add_isr(struct vxgedev *vdev) ...@@ -2535,16 +2503,19 @@ static int vxge_add_isr(struct vxgedev *vdev)
goto INTA_MODE; goto INTA_MODE;
} }
msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle, vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
intr_idx - 2); msix_idx);
vdev->vxge_entries[intr_cnt].in_use = 1; vdev->vxge_entries[intr_cnt].in_use = 1;
vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[vp_idx]; vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
} }
INTA_MODE: INTA_MODE:
#endif #endif
snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name);
if (vdev->config.intr_type == INTA) { if (vdev->config.intr_type == INTA) {
snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
"%s:vxge:INTA", vdev->ndev->name);
vxge_hw_device_set_intr_type(vdev->devh, vxge_hw_device_set_intr_type(vdev->devh,
VXGE_HW_INTR_MODE_IRQLINE); VXGE_HW_INTR_MODE_IRQLINE);
vxge_hw_vpath_tti_ci_set(vdev->devh, vxge_hw_vpath_tti_ci_set(vdev->devh,
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#define PCI_DEVICE_ID_TITAN_UNI 0x5833 #define PCI_DEVICE_ID_TITAN_UNI 0x5833
#define VXGE_USE_DEFAULT 0xffffffff #define VXGE_USE_DEFAULT 0xffffffff
#define VXGE_HW_VPATH_MSIX_ACTIVE 4 #define VXGE_HW_VPATH_MSIX_ACTIVE 4
#define VXGE_ALARM_MSIX_ID 2
#define VXGE_HW_RXSYNC_FREQ_CNT 4 #define VXGE_HW_RXSYNC_FREQ_CNT 4
#define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ) #define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ)
#define VXGE_LL_RX_COPY_THRESHOLD 256 #define VXGE_LL_RX_COPY_THRESHOLD 256
......
...@@ -231,8 +231,7 @@ void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id) ...@@ -231,8 +231,7 @@ void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
{ {
__vxge_hw_pio_mem_write32_upper( __vxge_hw_pio_mem_write32_upper(
(u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)), (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
0, 32),
&channel->common_reg->set_msix_mask_vect[msix_id%4]); &channel->common_reg->set_msix_mask_vect[msix_id%4]);
return; return;
...@@ -252,8 +251,7 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id) ...@@ -252,8 +251,7 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
{ {
__vxge_hw_pio_mem_write32_upper( __vxge_hw_pio_mem_write32_upper(
(u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)), (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
0, 32),
&channel->common_reg->clear_msix_mask_vect[msix_id%4]); &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
return; return;
...@@ -2220,29 +2218,24 @@ enum vxge_hw_status vxge_hw_vpath_alarm_process( ...@@ -2220,29 +2218,24 @@ enum vxge_hw_status vxge_hw_vpath_alarm_process(
* This API will associate a given MSIX vector numbers with the four TIM * This API will associate a given MSIX vector numbers with the four TIM
* interrupts and alarm interrupt. * interrupts and alarm interrupt.
*/ */
enum vxge_hw_status void
vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id, vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
int alarm_msix_id) int alarm_msix_id)
{ {
u64 val64; u64 val64;
struct __vxge_hw_virtualpath *vpath = vp->vpath; struct __vxge_hw_virtualpath *vpath = vp->vpath;
struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
u32 first_vp_id = vpath->hldev->first_vp_id; u32 vp_id = vp->vpath->vp_id;
val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI( val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
(first_vp_id * 4) + tim_msix_id[0]) | (vp_id * 4) + tim_msix_id[0]) |
VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI( VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
(first_vp_id * 4) + tim_msix_id[1]) | (vp_id * 4) + tim_msix_id[1]);
VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(
(first_vp_id * 4) + tim_msix_id[2]);
val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(
(first_vp_id * 4) + tim_msix_id[3]);
writeq(val64, &vp_reg->interrupt_cfg0); writeq(val64, &vp_reg->interrupt_cfg0);
writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG( writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
(first_vp_id * 4) + alarm_msix_id), (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
&vp_reg->interrupt_cfg2); &vp_reg->interrupt_cfg2);
if (vpath->hldev->config.intr_mode == if (vpath->hldev->config.intr_mode ==
...@@ -2263,7 +2256,7 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id, ...@@ -2263,7 +2256,7 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
0, 32), &vp_reg->one_shot_vect3_en); 0, 32), &vp_reg->one_shot_vect3_en);
} }
return VXGE_HW_OK; return;
} }
/** /**
...@@ -2283,8 +2276,7 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id) ...@@ -2283,8 +2276,7 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
{ {
struct __vxge_hw_device *hldev = vp->vpath->hldev; struct __vxge_hw_device *hldev = vp->vpath->hldev;
__vxge_hw_pio_mem_write32_upper( __vxge_hw_pio_mem_write32_upper(
(u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id + (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
(msix_id / 4)), 0, 32),
&hldev->common_reg->set_msix_mask_vect[msix_id % 4]); &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
return; return;
...@@ -2309,14 +2301,12 @@ vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id) ...@@ -2309,14 +2301,12 @@ vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
if (hldev->config.intr_mode == if (hldev->config.intr_mode ==
VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
__vxge_hw_pio_mem_write32_upper( __vxge_hw_pio_mem_write32_upper(
(u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
(msix_id/4)), 0, 32),
&hldev->common_reg-> &hldev->common_reg->
clr_msix_one_shot_vec[msix_id%4]); clr_msix_one_shot_vec[msix_id%4]);
} else { } else {
__vxge_hw_pio_mem_write32_upper( __vxge_hw_pio_mem_write32_upper(
(u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
(msix_id/4)), 0, 32),
&hldev->common_reg-> &hldev->common_reg->
clear_msix_mask_vect[msix_id%4]); clear_msix_mask_vect[msix_id%4]);
} }
...@@ -2341,8 +2331,7 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id) ...@@ -2341,8 +2331,7 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
{ {
struct __vxge_hw_device *hldev = vp->vpath->hldev; struct __vxge_hw_device *hldev = vp->vpath->hldev;
__vxge_hw_pio_mem_write32_upper( __vxge_hw_pio_mem_write32_upper(
(u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
(msix_id/4)), 0, 32),
&hldev->common_reg->clear_msix_mask_vect[msix_id%4]); &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
return; return;
......
...@@ -2376,7 +2376,7 @@ enum vxge_hw_status vxge_hw_vpath_alarm_process( ...@@ -2376,7 +2376,7 @@ enum vxge_hw_status vxge_hw_vpath_alarm_process(
struct __vxge_hw_vpath_handle *vpath_handle, struct __vxge_hw_vpath_handle *vpath_handle,
u32 skip_alarms); u32 skip_alarms);
enum vxge_hw_status void
vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle, vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle,
int *tim_msix_id, int alarm_msix_id); int *tim_msix_id, int alarm_msix_id);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment