Commit 284a83a0 authored by David S. Miller's avatar David S. Miller

Merge branch 'defxx-next'

Maciej W. Rozycki says:

====================
defxx: Fixes for 64-bit host support

 This mini patch series addresses issues with 64-bit host support for FDDI
interface boards supported by the defxx driver where DMA mapping
synchronisation is required on swiotlb systems.  While PDQ, the DMA engine
chip used with these boards, supports 48-bit addressing that would
normally suffice for typical 64-bit systems in existence, the host bus
interface chips used by individual implementations have their limitations
as follows:

* DEFTA or DEC FDDIcontroller/TURBOchannel -- there's no host bus
  interface chip, the PDQ connects to TURBOchannel directly; TURBOchannel
  supports DMA addressing of up to 16GB (34-bit addressing), however no
  TURBOchannel system has ever been made that supports more than 1GB of
  RAM, so in reality no remapping is ever required,

* DEFEA or DEC FDDIcontroller/EISA -- the ESIC EISA interface chip only
  supports 32-bit addressing, all accesses beyond 4GB have to be remapped,

* DEFPA or DEC FDDIcontroller/PCI -- the PFI PCI interface chip rev. 1 & 2
  only support 32-bit addressing, they have 32 AD lines only both on the
  PDQ and the PCI side, and consequently no Dual Address Cycle support, so
  all accesses beyond 4GB have to be remapped; the range of addressing
  supported by PFI rev. 3 is currently not certain, however the chip is
  backwards compatible with earlier revisions and will work with code that
  supports them.

Some other issues discovered in the course of correcting 64-bit support
have been fixed as well.  Each of the patches is functionally
self-contained and can be applied independentely, although there may be
mechanical dependencies making it necessary to apply patches in order.

 The driver suffers from non-standard formatting and while I did my best
with these bug fixes to follow our coding style, I found some pieces
hopeless, checkpatch.pl will complain.  I plan to reformat the whole
driver, that will inevitably require factoring out some pieces into
separate functions, but that's going to be a major effort and therefore I
want to do this separately, with no functional changes made at the same
time.  If anyone has specific suggestions as to how to reformat any of the
pieces submitted here for a better layout, then I'll be happy to take them
into account.

 And last but not least many thanks to Robert Coerver, who was the most
recent person to report this problem with the driver and was kind enough
to patiently try a few revisions of the driver update on his system as I
was finding and addressing issues.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 90fb5679 8848761f
......@@ -196,6 +196,7 @@
* 14 Jun 2005 macro Use irqreturn_t.
* 23 Oct 2006 macro Big-endian host support.
* 14 Dec 2006 macro TURBOchannel support.
* 01 Jul 2014 macro Fixes for DMA on 64-bit hosts.
*/
/* Include files */
......@@ -224,8 +225,8 @@
/* Version information string should be updated prior to each new release! */
#define DRV_NAME "defxx"
#define DRV_VERSION "v1.10"
#define DRV_RELDATE "2006/12/14"
#define DRV_VERSION "v1.11"
#define DRV_RELDATE "2014/07/01"
static char version[] =
DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
......@@ -2923,21 +2924,35 @@ static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
{
struct sk_buff *newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE, GFP_NOIO);
struct sk_buff *newskb;
dma_addr_t dma_addr;
newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE,
GFP_NOIO);
if (!newskb)
return -ENOMEM;
bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
/*
* align to 128 bytes for compatibility with
* the old EISA boards.
*/
my_skb_align(newskb, 128);
bp->descr_block_virt->rcv_data[i + j].long_1 =
(u32)dma_map_single(bp->bus_dev, newskb->data,
NEW_SKB_SIZE,
dma_addr = dma_map_single(bp->bus_dev,
newskb->data,
PI_RCV_DATA_K_SIZE_MAX,
DMA_FROM_DEVICE);
if (dma_mapping_error(bp->bus_dev, dma_addr)) {
dev_kfree_skb(newskb);
return -ENOMEM;
}
bp->descr_block_virt->rcv_data[i + j].long_0 =
(u32)(PI_RCV_DESCR_M_SOP |
((PI_RCV_DATA_K_SIZE_MAX /
PI_ALIGN_K_RCV_DATA_BUFF) <<
PI_RCV_DESCR_V_SEG_LEN));
bp->descr_block_virt->rcv_data[i + j].long_1 =
(u32)dma_addr;
/*
* p_rcv_buff_va is only used inside the
* kernel so we put the skb pointer here.
......@@ -3004,7 +3019,7 @@ static void dfx_rcv_queue_process(
PI_TYPE_2_CONSUMER *p_type_2_cons; /* ptr to rcv/xmt consumer block register */
char *p_buff; /* ptr to start of packet receive buffer (FMC descriptor) */
u32 descr, pkt_len; /* FMC descriptor field and packet length */
struct sk_buff *skb; /* pointer to a sk_buff to hold incoming packet data */
struct sk_buff *skb = NULL; /* pointer to a sk_buff to hold incoming packet data */
/* Service all consumed LLC receive frames */
......@@ -3012,7 +3027,7 @@ static void dfx_rcv_queue_process(
while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons)
{
/* Process any errors */
dma_addr_t dma_addr;
int entry;
entry = bp->rcv_xmt_reg.index.rcv_comp;
......@@ -3021,6 +3036,11 @@ static void dfx_rcv_queue_process(
#else
p_buff = bp->p_rcv_buff_va[entry];
#endif
dma_addr = bp->descr_block_virt->rcv_data[entry].long_1;
dma_sync_single_for_cpu(bp->bus_dev,
dma_addr + RCV_BUFF_K_DESCR,
sizeof(u32),
DMA_FROM_DEVICE);
memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
if (descr & PI_FMC_DESCR_M_RCC_FLUSH)
......@@ -3042,31 +3062,46 @@ static void dfx_rcv_queue_process(
bp->rcv_length_errors++;
else{
#ifdef DYNAMIC_BUFFERS
struct sk_buff *newskb = NULL;
if (pkt_len > SKBUFF_RX_COPYBREAK) {
struct sk_buff *newskb;
dma_addr_t new_dma_addr;
newskb = dev_alloc_skb(NEW_SKB_SIZE);
newskb = netdev_alloc_skb(bp->dev,
NEW_SKB_SIZE);
if (newskb){
my_skb_align(newskb, 128);
new_dma_addr = dma_map_single(
bp->bus_dev,
newskb->data,
PI_RCV_DATA_K_SIZE_MAX,
DMA_FROM_DEVICE);
if (dma_mapping_error(
bp->bus_dev,
new_dma_addr)) {
dev_kfree_skb(newskb);
newskb = NULL;
}
}
if (newskb) {
rx_in_place = 1;
my_skb_align(newskb, 128);
skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
dma_unmap_single(bp->bus_dev,
bp->descr_block_virt->rcv_data[entry].long_1,
NEW_SKB_SIZE,
dma_addr,
PI_RCV_DATA_K_SIZE_MAX,
DMA_FROM_DEVICE);
skb_reserve(skb, RCV_BUFF_K_PADDING);
bp->p_rcv_buff_va[entry] = (char *)newskb;
bp->descr_block_virt->rcv_data[entry].long_1 =
(u32)dma_map_single(bp->bus_dev,
newskb->data,
NEW_SKB_SIZE,
DMA_FROM_DEVICE);
} else
skb = NULL;
} else
bp->descr_block_virt->rcv_data[entry].long_1 = (u32)new_dma_addr;
}
}
if (!newskb)
#endif
skb = dev_alloc_skb(pkt_len+3); /* alloc new buffer to pass up, add room for PRH */
/* Alloc new buffer to pass up,
* add room for PRH. */
skb = netdev_alloc_skb(bp->dev,
pkt_len + 3);
if (skb == NULL)
{
printk("%s: Could not allocate receive buffer. Dropping packet.\n", bp->dev->name);
......@@ -3079,6 +3114,12 @@ static void dfx_rcv_queue_process(
#endif
{
/* Receive buffer allocated, pass receive packet up */
dma_sync_single_for_cpu(
bp->bus_dev,
dma_addr +
RCV_BUFF_K_PADDING,
pkt_len + 3,
DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb,
p_buff + RCV_BUFF_K_PADDING,
......@@ -3181,6 +3222,7 @@ static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
u8 prod; /* local transmit producer index */
PI_XMT_DESCR *p_xmt_descr; /* ptr to transmit descriptor block entry */
XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
dma_addr_t dma_addr;
unsigned long flags;
netif_stop_queue(dev);
......@@ -3228,6 +3270,20 @@ static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
}
}
/* Write the three PRH bytes immediately before the FC byte */
skb_push(skb, 3);
skb->data[0] = DFX_PRH0_BYTE; /* these byte values are defined */
skb->data[1] = DFX_PRH1_BYTE; /* in the Motorola FDDI MAC chip */
skb->data[2] = DFX_PRH2_BYTE; /* specification */
dma_addr = dma_map_single(bp->bus_dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(bp->bus_dev, dma_addr)) {
skb_pull(skb, 3);
return NETDEV_TX_BUSY;
}
spin_lock_irqsave(&bp->lock, flags);
/* Get the current producer and the next free xmt data descriptor */
......@@ -3248,13 +3304,6 @@ static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]); /* also bump producer index */
/* Write the three PRH bytes immediately before the FC byte */
skb_push(skb,3);
skb->data[0] = DFX_PRH0_BYTE; /* these byte values are defined */
skb->data[1] = DFX_PRH1_BYTE; /* in the Motorola FDDI MAC chip */
skb->data[2] = DFX_PRH2_BYTE; /* specification */
/*
* Write the descriptor with buffer info and bump producer
*
......@@ -3283,8 +3332,7 @@ static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
*/
p_xmt_descr->long_0 = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN));
p_xmt_descr->long_1 = (u32)dma_map_single(bp->bus_dev, skb->data,
skb->len, DMA_TO_DEVICE);
p_xmt_descr->long_1 = (u32)dma_addr;
/*
* Verify that descriptor is actually available
......@@ -3447,8 +3495,13 @@ static void dfx_rcv_flush( DFX_board_t *bp )
{
struct sk_buff *skb;
skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j];
if (skb)
if (skb) {
dma_unmap_single(bp->bus_dev,
bp->descr_block_virt->rcv_data[i+j].long_1,
PI_RCV_DATA_K_SIZE_MAX,
DMA_FROM_DEVICE);
dev_kfree_skb(skb);
}
bp->p_rcv_buff_va[i+j] = NULL;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment