Commit bfeedc98 authored by Linus Torvalds's avatar Linus Torvalds

Import 0.99.14u

parent 50a32c2c
VERSION = 0.99
PATCHLEVEL = 14
ALPHA = t
ALPHA = u
all: Version zImage
......
......@@ -3,13 +3,15 @@
* to implement 5380 SCSI drivers under Linux with a non-trantor
* architecture.
*
* Note that these routines also work with NR53c400 family chips.
*
* Copyright 1993, Drew Eckhardt
* Visionary Computing
* (Unix and Linux consulting and custom programming)
* drew@colorado.edu
* +1 (303) 440-4894
* +1 (303) 666-5836
*
* DISTRIBUTION REALEASE 3.
* DISTRIBUTION REALEASE 4.
*
* For more information, please consult
*
......@@ -26,18 +28,41 @@
/*
* $Log: NCR5380.c,v $
* Revision 1.5 1994/01/19 09:14:57 drew
* Fixed udelay() hack that was being used on DATAOUT phases
* instead of a propper wait for the final handshake.
*
* Revision 1.4 1994/01/19 06:44:25 drew
* *** empty log message ***
*
* Revision 1.3 1994/01/19 05:24:40 drew
* Added support for TCR LAST_BYTE_SENT bit.
*
* Revision 1.2 1994/01/15 06:14:11 drew
* REAL DMA support, bug fixes.
*
* Revision 1.1 1994/01/15 06:00:54 drew
* Initial revision
*
*/
/*
* Furthur development / testing that should be done :
* 1. Test USLEEP code
* 2. Test SCSI-II tagged queueing (I have no devices which support
* 1. Cleanup the NCR5380_transfer_dma function and DMA operation complete
* code so that everything does the same thing that's done at the
* end of a pseudo-DMA read operation.
*
* 2. Fix REAL_DMA (interrupt driven, polled works fine) -
* basically, transfer size needs to be reduced by one
* and the last byte read as is done with PSEUDO_DMA.
*
* 3. Test USLEEP code
*
* 4. Test SCSI-II tagged queueing (I have no devices which support
* tagged queueing)
* 3. Flesh out REAL_DMA code (my sample board doesn't support this)
* Perhaps some of the 680x0 porting crew can help out here?
* 4. Test linked command handling code after Eric is ready with
*
* 5. Test linked command handling code after Eric is ready with
* the high level code.
* 5. Tie instance handling into Eric's multi-host patches
*/
#ifndef notyet
......@@ -46,6 +71,11 @@
#undef REAL_DMA
#endif
#ifdef REAL_DMA_POLL
#undef READ_OVERRUNS
#define READ_OVERRUNS
#endif
/*
* Design
* Issues :
......@@ -168,6 +198,10 @@
*
* REAL_DMA - if defined, REAL DMA is used during the data transfer phases.
*
* REAL_DMA_POLL - if defined, REAL DMA is used but the driver doesn't
* rely on phase mismatch and EOP interrupts to determine end
* of phase.
*
* SCSI2 - if defined, SCSI-2 tagged queing is used where possible
*
* UNSAFE - leave interrupts enabled during pseudo-DMA transfers. You
......@@ -208,9 +242,16 @@
* Either real DMA *or* pseudo DMA may be implemented
* REAL functions :
* NCR5380_REAL_DMA should be defined if real DMA is to be used.
* Note that the DMA setup functions should return the number of bytes
* that they were able to program the controller for.
*
* Also note that generic i386/PC versions of these macros are
* available as NCR5380_i386_dma_write_setup,
* NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual.
*
* NCR5380_dma_write_setup(instance, src, count) - initialize
* NCR5380_dma_read_setup(instance, dst, count) - initialize
* NCR5380_dma_residual(); - residual count
* NCR5380_dma_residual(instance); - residual count
*
* PSEUDO functions :
* NCR5380_pwrite(instance, src, count)
......@@ -624,9 +665,12 @@ static void NCR5380_print_options (struct Scsi_Host *instance) {
#ifdef DIFFERENTIAL
" DIFFERENTIAL"
#endif
#ifdef REALDMA
#ifdef REAL_DMA
" REAL DMA"
#endif
#ifdef REAL_DMA_POLL
" REAL DMA POLL"
#endif
#ifdef PARITY
" PARITY"
#endif
......@@ -680,6 +724,7 @@ static void NCR5380_init (struct Scsi_Host *instance) {
hostdata->connected = NULL;
hostdata->issue_queue = NULL;
hostdata->disconnected_queue = NULL;
hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT;
if (!the_template) {
the_template = instance->hostt;
......@@ -917,6 +962,7 @@ static void NCR5380_intr (int irq) {
NCR5380_local_declare();
struct Scsi_Host *instance;
int done;
unsigned char basr;
#if (NDEBUG & NDEBUG_INTR)
printk("scsi : NCR5380 irq %d triggered\n", irq);
#endif
......@@ -928,8 +974,9 @@ static void NCR5380_intr (int irq) {
/* Look for pending interrupts */
NCR5380_setup(instance);
basr = NCR5380_read(BUS_AND_STATUS_REG);
/* XXX dispatch to appropriate routine if found and done=0 */
if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_IRQ) {
if (basr & BASR_IRQ) {
#if (NDEBUG & NDEBUG_INTR)
NCR5380_print(instance);
#endif
......@@ -942,7 +989,7 @@ static void NCR5380_intr (int irq) {
#endif
NCR5380_reselect(instance);
(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
} else if (NCR5380_read(BUS_AND_STATUS_REG) &
} else if (basr &
BASR_PARITY_ERROR) {
#if (NDEBUG & NDEBUG_INTR)
printk("scsi%d : PARITY interrupt\n", instance->host_no);
......@@ -955,10 +1002,39 @@ static void NCR5380_intr (int irq) {
*/
#if defined(REAL_DMA)
/*
* We should only get PHASE MISMATCH and EOP interrupts
* if we have DMA enabled, so do a sanity check based on
* the current setting of the MODE register.
*/
if ((NCR5380_read(MODE_REG) & MR_DMA) && ((basr &
BASR_END_DMA_TRANSFER) ||
!(basr & BASR_PHASE_MATCH))) {
int transfered;
if (!hostdata->connected)
panic("scsi%d : recieved end of DMA interrupt with no connected cmd\n",
instance->hostno);
transfered = (hostdata->dmalen - NCR5380_dma_residual(instance));
hostdata->connected->SCp.this_residual -= transferred;
hostdata->connected->SCp.ptr += transferred;
hostdata->dmalen = 0;
(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
while (NCR5380_read(BUS_AND_STATUS_REG) &
BASR_ACK));
NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
}
#else
#if (NDEBUG & NDEBUG_INTR)
printk("scsi : unknown interrupt\n");
printk("scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG));
#endif
(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
#endif
}
} /* if BASR_IRQ */
......@@ -1352,7 +1428,7 @@ static int NCR5380_transfer_pio (struct Scsi_Host *instance,
return -1;
}
#if defined(REAL_DMA) | defined(PSEUDO_DMA)
#if defined(REAL_DMA) || defined(PSEUDO_DMA) || defined (REAL_DMA_POLL)
/*
* Function : int NCR5380_transfer_dma (struct Scsi_Host *instance,
* unsigned char *phase, int *count, unsigned char **data)
......@@ -1381,26 +1457,42 @@ static int NCR5380_transfer_dma (struct Scsi_Host *instance,
register unsigned char *d = *data;
unsigned char tmp;
int foo;
NCR5380_setup(instance);
#ifdef REAL_DMA
instance->dmalen = c;
if (p & SR_IO)
NCR5380_dma_read_setup(d, c);
else
NCR5380_dma_write_setup(d, c);
#if defined(REAL_DMA_POLL)
int cnt, toPIO;
unsigned char saved_data = 0, overrun = 0, residue;
#endif
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
instance->hostdata;
NCR5380_setup(instance);
if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) {
*phase = tmp;
return -1;
}
#if defined(REAL_DMA) || defined(REAL_DMA_POLL)
#ifdef READ_OVERRUNS
if (p & SR_IO) {
c -= 2;
}
#endif
#if (NDEBUG & NDEBUG_DMA)
printk("scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n",
instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" :
"writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d);
#endif
hostdata->dma_len = (p & SR_IO) ?
NCR5380_dma_read_setup(instance, d, c) :
NCR5380_dma_write_setup(instance, d, c);
#endif
NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
#ifdef REAL_DMA
NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_EOP_INTR | MR_MONITOR_BSY);
#elif defined(REAL_DMA_POLL)
NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE);
#else
/*
* Note : on my sample board, watch-dog timeouts occured when interrupts
......@@ -1408,10 +1500,14 @@ static int NCR5380_transfer_dma (struct Scsi_Host *instance,
* before the setting of DMA mode to after transfer of the last byte.
*/
#ifndef UNSAFE
#if defined(PSEUDO_DMA) && !defined(UNSAFE)
cli();
#endif
NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE);
#endif /* def REAL_DMA */
#if (NDEBUG & NDEBUG_DMA) & 0
printk("scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG));
#endif
if (p & SR_IO)
......@@ -1421,19 +1517,200 @@ static int NCR5380_transfer_dma (struct Scsi_Host *instance,
NCR5380_write(START_DMA_SEND_REG, 0);
}
#ifdef REAL_DMA
#if defined(REAL_DMA_POLL)
do {
tmp = NCR5380_read(BUS_AND_STATUS_REG);
} while ((tmp & BASR_PHASE_MATCH) && !(tmp & (BASR_BUSY_ERROR |
BASR_END_DMA_TRANSFER)));
/*
At this point, either we've completed DMA, or we have a phase mismatch,
or we've unexpectedly lost BUSY (which is a real error).
For write DMAs, we want to wait until the last byte has been
transferred out over the bus before we turn off DMA mode. Alas, there
seems to be no terribly good way of doing this on a 5380 under all
conditions. For non-scatter-gather operations, we can wait until REQ
and ACK both go false, or until a phase mismatch occurs. Gather-writes
are nastier, since the device will be expecting more data than we
are prepared to send it, and REQ will remain asserted. On a 53C8[01] we
could test LAST BIT SENT to assure transfer (I imagine this is precisely
why this signal was added to the newer chips) but on the older 538[01]
this signal does not exist. The workaround for this lack is a watchdog;
we bail out of the wait-loop after a modest amount of wait-time if
the usual exit conditions are not met. Not a terribly clean or
correct solution :-%
Reads are equally tricky due to a nasty characteristic of the NCR5380.
If the chip is in DMA mode for an READ, it will respond to a target's
REQ by latching the SCSI data into the INPUT DATA register and asserting
ACK, even if it has _already_ been notified by the DMA controller that
the current DMA transfer has completed! If the NCR5380 is then taken
out of DMA mode, this already-acknowledged byte is lost.
This is not a problem for "one DMA transfer per command" reads, because
the situation will never arise... either all of the data is DMA'ed
properly, or the target switches to MESSAGE IN phase to signal a
disconnection (either operation bringing the DMA to a clean halt).
However, in order to handle scatter-reads, we must work around the
problem. The chosen fix is to DMA N-2 bytes, then check for the
condition before taking the NCR5380 out of DMA mode. One or two extra
bytes are tranferred via PIO as necessary to fill out the original
request.
*/
if (p & SR_IO) {
#ifdef READ_OVERRUNS
udelay(10);
if (((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH|BASR_ACK)) ==
(BASR_PHASE_MATCH | BASR_ACK))) {
saved_data = NCR5380_read(INPUT_DATA_REGISTER);
overrun = 1;
}
#endif
} else {
int limit = 100;
while (((tmp = NCR5380_read(BUS_AND_STATUS_REG)) & BASR_ACK) ||
(NCR5380_read(STATUS_REG) & SR_REQ)) {
if (!(tmp & BASR_PHASE_MATCH)) break;
if (--limit < 0) break;
}
}
#if (NDEBUG & NDEBUG_DMA)
printk("scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n",
instance->host_no, tmp, NCR5380_read(STATUS_REG));
#endif
NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
residue = NCR5380_dma_residual(instance);
c -= residue;
*count -= c;
*data += c;
*phase = NCR5380_read(STATUS_REG) & PHASE_MASK;
#ifdef READ_OVERRUNS
if (*phase == p && (p & SR_IO) && residue == 0) {
if (overrun) {
#if (NDEBUG & NDEBUG_DMA)
printk("Got an input overrun, using saved byte\n");
#endif
**data = saved_data;
*data += 1;
*count -= 1;
cnt = toPIO = 1;
} else {
printk("No overrun??\n");
cnt = toPIO = 2;
}
#if (NDEBUG & NDEBUG_DMA)
printk("Doing %d-byte PIO to 0x%X\n", cnt, *data);
#endif
NCR5380_transfer_pio(instance, phase, &cnt, data);
*count -= toPIO - cnt;
}
#endif
#if (NDEBUG & NDEBUG_DMA)
printk("Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n",
*data, *count, *(*data+*count-1), *(*data+*count));
#endif
return 0;
#elif defined(REAL_DMA)
return 0;
#else /* defined(REAL_DMA_POLL) */
if (p & SR_IO) {
if (!(foo = NCR5380_pread(instance, d, c - 1))) {
/*
* We can't disable DMA mode after successfully transfering
* what we plan to be the last byte, since that would open up
* a race condition where if the target asserted REQ before
* we got the DMA mode reset, the NCR5380 would have latched
* an additional byte into the INPUT DATA register and we'd
* have dropped it.
*
* The workarround was to transfer one fewer bytes than we
* intended to with the pseudo-DMA read function, wait for
* the chip to latch the last byte, read it, and then disable
* pseudo-DMA mode.
*
* After REQ is asserted, the NCR5380 asserts DRQ and ACK.
* REQ is deasserted when ACK is asserted, and not reasserted
* until ACK goes false. Since the NCR5380 won't lower ACK
* until DACK is asserted, which won't happen unless we twiddle
* the DMA port or we take the NCR5380 out of DMA mode, we
* can gurantee that we won't handshake another extra
* byte.
*/
while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ));
/* Wait for clean handshake */
while (NCR5380_read(STATUS_REG) & SR_REQ);
d[c - 1] = NCR5380_read(INPUT_DATA_REG);
}
} else {
int timeout;
if (!(foo = NCR5380_pwrite(instance, d, c))) {
/*
* Wait for the last byte to be sent. If REQ is being asserted for
* the byte we're interested, we'll ACK it and it will go false.
*/
if (!(hostdata->flags & FLAG_HAS_LAST_BYTE_SENT)) {
timeout = 20000;
#if 1
#if 1
while (!(NCR5380_read(BUS_AND_STATUS_REG) &
BASR_DRQ) && (NCR5380_read(BUS_AND_STATUS_REG) &
BASR_PHASE_MATCH));
#else
foo = ((p & SR_IO) ? NCR5380_pread(instance, d, c) :
NCR5380_pwrite(instance, d, c));
NCR5380_write(MODE_REG, MR_BASE);
if (NCR5380_read(STATUS_REG) & SR_REQ) {
for (; timeout &&
!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_ACK);
--timeout);
for (; timeout && (NCR5380_read(STATUS_REG) & SR_REQ);
--timeout);
}
#endif
#if (NDEBUG & NDEBUG_LAST_BYTE_SENT)
if (!timeout)
printk("scsi%d : timed out on last byte\n",
instance->host_no);
#endif
if (hostdata->flags & FLAG_CHECK_LAST_BYTE_SENT) {
hostdata->flags &= ~FLAG_CHECK_LAST_BYTE_SENT;
if (NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT) {
hostdata->flags |= FLAG_HAS_LAST_BYTE_SENT;
#if (NDEBUG & NDEBUG_LAST_BYTE_SENT)
printk("scsi%d : last bit sent works\n",
instance->host_no);
#endif
}
}
} else
while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT));
#else
udelay (5);
#endif
}
}
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
NCR5380_write(MODE_REG, MR_BASE);
*data = d + c;
*count = 0;
*phase = (NCR5380_read(STATUS_REG & PHASE_MASK));
#ifndef UNSAFE
#if defined(PSEUDO_DMA) && !defined(UNSAFE)
sti();
#endif
#endif /* defined(REAL_DMA_POLL) */
return foo;
#endif /* def REAL_DMA */
}
......@@ -1461,7 +1738,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) {
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
instance->hostdata;
unsigned char msgout = NOP;
int len;
int len, transfersize;
unsigned char *data;
unsigned char phase, tmp, old_phase=0xff;
Scsi_Cmnd *cmd = (Scsi_Cmnd *) hostdata->connected;
......@@ -1515,11 +1792,17 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) {
* in an unconditional loop.
*/
#if (defined(PSEUDO_DMA))
if (!scsi_devices[cmd->index].borken && cmd->transfersize &&
#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL)
#ifdef NCR5380_dma_xfer_len
if (!scsi_devices[cmd->index].borken &&
(transfersize = NCR5380_dma_xfer_len(instance, cmd)) != 0) {
#else
if (!scsi_devices[cmd->index].borken &&
(transfersize = cmd->transfersize) &&
cmd->SCp.this_residual && !(cmd->SCp.this_residual %
cmd->transfersize)) {
len = cmd->transfersize;
transfersize)) {
#endif
len = transfersize;
if (NCR5380_transfer_dma(instance, &phase,
&len, (unsigned char **) &cmd->SCp.ptr)) {
/*
......@@ -1533,9 +1816,9 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) {
ICR_ASSERT_ATN);
msgout = ABORT;
} else
cmd->SCp.this_residual -= cmd->transfersize;
cmd->SCp.this_residual -= transfersize - len;
} else
#endif /* (defined(REAL_DMA) || defined(PSEUDO_DMA)) */
#endif /* defined(REAL_DMA) || defined(REAL_DMA_POLL) */
NCR5380_transfer_pio(instance, &phase,
(int *) &cmd->SCp.this_residual, (unsigned char **)
&cmd->SCp.ptr);
......@@ -1547,7 +1830,6 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) {
* for tagged queuing, and the host should initiate any
* negotiations for sync. SCSI, etc.
*/
len = 1;
data = &tmp;
NCR5380_transfer_pio(instance, &phase, &len, &data);
......@@ -1608,14 +1890,26 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) {
hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
/*
* Use the status and message bytes from the original
* command.
* I'm not sure what the correct thing to do here is :
*
* If the command that just executed is NOT a request
* sense, the obvious thing to do is to set the result
* code to the values of the stored parameters.
*
* If it was a REQUEST SENSE command, we need some way
* to differentiate between the failure code of the original
* and the failure code of the REQUEST sense - the obvious
* case is success, where we fall through and leave the result
* code unchanged.
*
* The non-obvious place is where the REQUEST SENSE failed
*/
if (cmd->cmnd[0] != REQUEST_SENSE)
cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
else if (cmd->SCp.Status != GOOD)
cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
#ifdef AUTOSENSE
if ((cmd->cmnd[0] != REQUEST_SENSE) &&
(cmd->SCp.Status == CHECK_CONDITION)) {
......@@ -1648,6 +1942,9 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) {
cmd->scsi_done(cmd);
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
while ((NCR5380_read(STATUS_REG) & SR_BSY) &&
!hostdata->connected);
return;
case MESSAGE_REJECT:
switch (hostdata->last_message) {
......@@ -1676,6 +1973,9 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) {
/* Enable reselect interupts */
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
/* Wait for bus free to avoid nasty timeouts */
while ((NCR5380_read(STATUS_REG) & SR_BSY) &&
!hostdata->connected);
return;
/*
* The SCSI data pointer is *IMPLICITLY* saved on a disconnect
......@@ -1696,6 +1996,8 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) {
* since ATN should be raised before ACK goes false when we reject a message
*/
printk("Unknown message!\n");
#ifdef notyet
/*
* If we get something wierd that we aren't expecting,
......@@ -1819,6 +2121,7 @@ static void NCR5380_reselect (struct Scsi_Host *instance) {
*/
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
while (NCR5380_read(STATUS_REG) & SR_SEL);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
......@@ -1943,7 +2246,7 @@ static void NCR5380_dma_complete (NCR5380_instance *instance) {
* not available on the 5380/5381 (only the various CMOS chips)
*/
while (NCR5380_read(STATUS_REG) & SR_ACK);
while (NCR5380_read(BUS_AND_STATUS_REG) & BASR_ACK);
NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
......@@ -1955,7 +2258,7 @@ static void NCR5380_dma_complete (NCR5380_instance *instance) {
*/
if (!(hostdata->connected->SCp.phase & SR_CD)) {
transferred = instance->dma_len - NCR5380_dma_residual();
transferred = instance->dmalen - NCR5380_dma_residual();
hostdata->connected->SCp.this_residual -= transferred;
hostdata->connected->SCp.ptr += transferred;
}
......@@ -1996,6 +2299,8 @@ int NCR5380_abort (Scsi_Cmnd *cmd, int code) {
#if (NDEBUG & NDEBUG_ABORT)
printk("scsi%d : abort called\n", instance->host_no);
printk(" basr 0x%X, sr 0x%X\n",
NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG));
#endif
/*
* Case 1 : If the command hasn't been issued yet, we simply remove it
......@@ -2117,28 +2422,26 @@ int NCR5380_abort (Scsi_Cmnd *cmd, int code) {
/*
* Function : int NCR5380_reset (struct Scsi_Cmnd *)
* Function : int NCR5380_reset (Scsi_Cmnd *cmd)
*
* Purpose : reset the SCSI bus.
*
* Returns : 0
*
*/
#ifndef NCR5380_reset
static
#endif
int NCR5380_reset (Scsi_Cmnd * SCpnt) {
int NCR5380_reset (Scsi_Cmnd *cmd) {
NCR5380_local_declare();
struct Scsi_Host *instance;
cli();
NCR5380_setup(cmd->host);
instance = SCpnt->host;
NCR5380_setup(instance);
cli();
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST);
udelay(1);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
sti();
if (SCpnt) SCpnt->flags |= NEEDS_JUMPSTART;
return 0;
}
......@@ -5,9 +5,9 @@
* Visionary Computing
* (Unix consulting and custom programming)
* drew@colorado.edu
* +1 (303) 440-4894
* +1 (303) 666-5836
*
* DISTRIBUTION RELEASE 3
* DISTRIBUTION RELEASE 4
*
* For more information, please consult
*
......@@ -23,12 +23,23 @@
/*
* $Log: NCR5380.h,v $
* Revision 1.3 1994/01/19 05:24:40 drew
* Added support for TCR LAST_BYTE_SENT bit.
*
* Revision 1.3 1994/01/19 05:24:40 drew
* Added support for TCR LAST_BYTE_SENT bit.
*
* Revision 1.2 1994/01/15 06:14:11 drew
* REAL DMA support, bug fixes.
*
* Revision 1.1 1994/01/15 06:00:54 drew
* Initial revision
*/
#ifndef NCR5380_H
#define NCR5380_H
#define NCR5380_PUBLIC_RELEASE 3
#define NCR5380_PUBLIC_RELEASE 4
#define NDEBUG_ARBITRATION 0x1
#define NDEBUG_AUTOSENSE 0x2
......@@ -47,6 +58,7 @@
#define NDEBUG_RESELECTION 0x4000
#define NDEBUG_SELECTION 0x8000
#define NDEBUG_USLEEP 0x10000
#define NDEBUG_LAST_BYTE_SENT 0x20000
/*
* The contents of the OUTPUT DATA register are asserted on the bus when
......@@ -141,7 +153,7 @@
* Used in DMA transfer mode, data is latched from the SCSI bus on
* the falling edge of REQ (ini) or ACK (tgt)
*/
#define INPUT_DATA_REGISTER 6 /* ro */
#define INPUT_DATA_REG 6 /* ro */
/* Write any value to this register to start a DMA recieve */
#define START_DMA_TARGET_RECIEVE_REG 6 /* wo */
......@@ -192,26 +204,31 @@
*/
/*
* These are "special" values for the irq field of the instance structure
* and returns from NCR5380_probe_irq.
* These are "special" values for the irq and dma_channel fields of the
* Scsi_Host structure
*/
#define IRQ_NONE 255
#define DMA_NONE 255
#define IRQ_AUTO 254
#define DMA_AUTO 254
#define FLAG_HAS_LAST_BYTE_SENT 1 /* NCR53c81 or better */
#define FLAG_CHECK_LAST_BYTE_SENT 2 /* Only test once */
#ifndef ASM
struct NCR5380_hostdata {
NCR5380_implementation_fields; /* implmenentation specific */
unsigned char id_mask, id_higher_mask; /* 1 << id, all bits greater */
volatile unsigned char busy[8]; /* index = target, bit = lun */
#ifdef REAL_DMA
#if defined(REAL_DMA) || defined(REAL_DMA_POLL)
volatile int dma_len; /* requested length of DMA */
#endif
volatile unsigned char last_message; /* last message OUT */
volatile Scsi_Cmnd *connected; /* currently connected command */
volatile Scsi_Cmnd *issue_queue; /* waiting to be issued */
volatile Scsi_Cmnd *disconnected_queue; /* waiting for reconnect */
int flags;
#ifdef USLEEP
unsigned long time_expires; /* in jiffies, set prior to sleeping */
struct Scsi_Host *next_timer;
......@@ -236,7 +253,7 @@ int NCR5380_abort (Scsi_Cmnd *cmd, int code);
#ifndef NCR5380_reset
static
#endif
int NCR5380_reset (Scsi_Cmnd *);
int NCR5380_reset (Scsi_Cmnd *cmd);
#ifndef NCR5380_queue_command
static
#endif
......@@ -245,13 +262,62 @@ int NCR5380_queue_command (Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *));
static void NCR5380_reselect (struct Scsi_Host *instance);
static int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag);
#if defined(PSEUDO_DMA) || defined(REAL_DMA)
#if defined(PSEUDO_DMA) || defined(REAL_DMA) || defined(REAL_DMA_POLL)
static int NCR5380_transfer_dma (struct Scsi_Host *instance,
unsigned char *phase, int *count, unsigned char **data);
#endif
static int NCR5380_transfer_pio (struct Scsi_Host *instance,
unsigned char *phase, int *count, unsigned char **data);
#if (defined(REAL_DMA) || defined(REAL_DMA_POLL)) && defined(i386)
static __inline__ int NCR5380_i386_dma_setup (struct Scsi_Host *instance,
unsigned char *ptr, unsigned int count, unsigned char mode) {
unsigned limit;
if (instance->dma_channel <=3) {
if (count > 65536)
count = 65536;
limit = 65536 - (((unsigned) ptr) & 0xFFFF);
} else {
if (count > 65536 * 2)
count = 65536 * 2;
limit = 65536* 2 - (((unsigned) ptr) & 0x1FFFF);
}
if (count > limit) count = limit;
if ((count & 1) || (((unsigned) ptr) & 1))
panic ("scsi%d : attmpted unaligned DMA transfer\n", instance->host_no);
cli();
disable_dma(instance->dma_channel);
clear_dma_ff(instance->dma_channel);
set_dma_addr(instance->dma_channel, (unsigned int) ptr);
set_dma_count(instance->dma_channel, count);
set_dma_mode(instance->dma_channel, mode);
enable_dma(instance->dma_channel);
sti();
return count;
}
static __inline__ int NCR5380_i386_dma_write_setup (struct Scsi_Host *instance,
unsigned char *src, unsigned int count) {
return NCR5380_i386_dma_setup (instance, src, count, DMA_MODE_WRITE);
}
static __inline__ int NCR5380_i386_dma_read_setup (struct Scsi_Host *instance,
unsigned char *src, unsigned int count) {
return NCR5380_i386_dma_setup (instance, src, count, DMA_MODE_READ);
}
static __inline__ int NCR5380_i386_dma_residual (struct Scsi_Host *instance) {
register int tmp;
cli();
clear_dma_ff(instance->dma_channel);
tmp = get_dma_residue(instance->dma_channel);
sti();
return tmp;
}
#endif /* defined(REAL_DMA) && defined(i386) */
#endif __KERNEL_
#endif /* ndef ASM */
#endif /* NCR5380_H */
......@@ -722,7 +722,10 @@ static int aha1542_query(int base_io, int * transl)
printk("aha1542.c: Emulation mode not supported for AHA 174N hardware.\n");
return 1;
};
if (inquiry_result[0] == 0x44) { /* Detect 1542C */
/* 1542C returns 0x44, 1542CF returns 0x45 */
if (inquiry_result[0] == 0x44 || inquiry_result[0] == 0x45)
{ /* Detect 1542C */
*transl = aha1542_mbenable(base_io);
};
return 0;
......
/* fdomain.c -- Future Domain TMC-16x0 SCSI driver
* Created: Sun May 3 18:53:19 1992 by faith@cs.unc.edu
* Revised: Tue Jan 4 20:43:57 1994 by faith@cs.unc.edu
* Revised: Sun Jan 23 08:59:04 1994 by faith@cs.unc.edu
* Author: Rickard E. Faith, faith@cs.unc.edu
* Copyright 1992, 1993, 1994 Rickard E. Faith
*
* $Id: fdomain.c,v 5.8 1994/01/05 01:44:16 root Exp $
* $Id: fdomain.c,v 5.9 1994/01/23 13:59:14 root Exp $
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
......@@ -144,7 +144,7 @@
#include <linux/string.h>
#include <linux/ioport.h>
#define VERSION "$Revision: 5.8 $"
#define VERSION "$Revision: 5.9 $"
/* START OF USER DEFINABLE OPTIONS */
......@@ -305,6 +305,7 @@ struct signature {
/* 1 2 3 4 5 6 */
/* 123456789012345678901234567890123456789012345678901234567890 */
{ "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.07/28/89", 5, 50, 2, 0 },
{ "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V1.07/28/89", 5, 50, 2, 0 },
{ "FUTURE DOMAIN CORP. (C) 1992 V3.00.004/02/92", 5, 44, 3, 0 },
{ "FUTURE DOMAIN TMC-18XX (C) 1993 V3.203/12/93", 5, 44, 3, 2 },
{ "FUTURE DOMAIN TMC-18XX", 5, 22, -1, -1 },
......
......@@ -977,8 +977,10 @@ int ultrastor_biosparam(int size, int dev, int * dkinfo)
dkinfo[0] = config.heads;
dkinfo[1] = config.sectors;
dkinfo[2] = size / s; /* Ignore partial cylinders */
#if 0
if (dkinfo[2] > 1024)
dkinfo[2] = 1024;
#endif
return 0;
}
......
/* $Id: $
/* $Id: wd7000.c,v 1.2 1994/01/15 06:02:32 drew Exp $
* linux/kernel/wd7000.c
*
* Copyright (C) 1992 Thomas Wuensche
......@@ -508,7 +508,8 @@ void wd7000_revision(void)
}
static const char *wd_bases[] = {(char *)0xce000};
static const char *wd_bases[] = {(char *)0xce000,(char *)d8000};
typedef struct {
char * signature;
unsigned offset;
......
-bad
-bap
-fca
-fc1
-cdb
-sc
-bl
-psl
-di16
-lp
-ip5
......@@ -22,5 +22,6 @@
asmlinkage void iABI_emulate(struct pt_regs * regs)
{
printk("lcall 7,xxx: eax = %08lx\n",regs->eax);
printk("iBCS2 binaries not supported yet\n");
do_exit(SIGSEGV);
}
......@@ -43,12 +43,13 @@ struct linger {
#define MSG_OOB 1
#define MSG_PEEK 2
/* Setsockoptions(2) level. */
/* Setsockoptions(2) level. Thanks to BSD these must match IPPROTO_xxx */
#define SOL_SOCKET 1
#define SOL_IP 2
#define SOL_IPX 3
#define SOL_AX25 4
#define SOL_TCP 5
#define SOL_IP 0
#define SOL_IPX 256
#define SOL_AX25 257
#define SOL_TCP 6
#define SOL_UDP 17
/* For setsockoptions(2) */
#define SO_DEBUG 1
......@@ -64,19 +65,23 @@ struct linger {
#define SO_NO_CHECK 11
#define SO_PRIORITY 12
#define SO_LINGER 13
/* IP options */
#define IP_TOS 1
#define IPTOS_LOWDELAY 0x10
#define IPTOS_THROUGHPUT 0x08
#define IPTOS_RELIABILITY 0x04
#define IP_TTL 2
/* IPX options */
#define IPX_TYPE 1
/* AX.25 options */
#define AX25_WINDOW 1
/* TCP options */
#define TCP_MSS 1
#define TCP_NODELAY 2
/* TCP options - this way around because someone left a set in the c library includes */
#define TCP_NODELAY 1
#define TCP_MAXSEG 2
/* The various priorities. */
#define SOPRI_INTERACTIVE 0
......
......@@ -94,7 +94,7 @@ asmlinkage void alignment_check(void);
printk("ds: %04x es: %04x fs: %04x gs: %04x\n",
regs->ds, regs->es, regs->fs, regs->gs);
store_TR(i);
printk("Pid: %d, process nr: %d\n", current->pid, 0xffff & i);
printk("Pid: %d, process nr: %d (%s)\n", current->pid, 0xffff & i, current->comm);
for(i=0;i<20;i++)
printk("%02x ",0xff & get_seg_byte(regs->cs,(i+(char *)regs->eip)));
printk("\n");
......
......@@ -67,6 +67,7 @@ static int free_area_pages(unsigned long dindex, unsigned long index, unsigned l
set_pgdir(dindex,0);
mem_map[MAP_NR(page)] = 1;
free_page(page);
invalidate();
return 0;
}
......@@ -98,6 +99,7 @@ static int alloc_area_pages(unsigned long dindex, unsigned long index, unsigned
*pte = pg | PAGE_SHARED;
pte++;
} while (--nr);
invalidate();
return 0;
}
......
......@@ -529,7 +529,7 @@ arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
tbl->last_used = jiffies;
} else {
memcpy(&dst, ptr + (arp->ar_hln * 2) + arp->ar_pln, arp->ar_pln);
if (chk_addr(dst) != IS_MYADDR) {
if (chk_addr(dst) != IS_MYADDR && arp_proxies == 0) {
kfree_skb(skb, FREE_READ);
return(0);
} else {
......@@ -798,6 +798,7 @@ arp_get_info(char *buffer)
req->arp_ha.sa_family = apt->htype;
memcpy((char *) req->arp_ha.sa_data,
(char *) &apt->ha, apt->hlen);
req->arp_flags = apt->flags;
}
pos += sizeof(struct arpreq);
cli();
......
......@@ -119,7 +119,7 @@ packet_sendto(struct sock *sk, unsigned char *from, int len,
return -EMSGSIZE;
/* Now allocate the buffer, knowing 4K pagelimits wont break this line */
skb = (struct sk_buff *) sk->prot->wmalloc(sk, len+sizeof(*skb), 0, GFP_KERNEL);
skb = sk->prot->wmalloc(sk, len+sizeof(*skb), 0, GFP_KERNEL);
/* This shouldn't happen, but it could. */
if (skb == NULL) {
......
......@@ -187,7 +187,7 @@ raw_sendto(struct sock *sk, unsigned char *from, int len,
return(err);
}
skb = (struct sk_buff *) sk->prot->wmalloc(sk,
skb = sk->prot->wmalloc(sk,
len+sizeof(*skb) + sk->prot->max_header,
0, GFP_KERNEL);
if (skb == NULL) {
......
......@@ -205,6 +205,7 @@ void rt_add(short flags, unsigned long dst, unsigned long mask,
rt->rt_dev = dev;
rt->rt_gateway = gw;
rt->rt_mask = mask;
rt->rt_mtu = dev->mtu;
rt_print(rt);
/*
* What we have to do is loop though this until we have
......
......@@ -28,10 +28,11 @@ struct rtable {
unsigned long rt_dst;
unsigned long rt_mask;
unsigned long rt_gateway;
u_char rt_flags;
u_char rt_metric;
unsigned char rt_flags;
unsigned char rt_metric;
short rt_refcnt;
u_long rt_use;
unsigned long rt_use;
unsigned short rt_mss, rt_mtu;
struct device *rt_dev;
};
......
......@@ -1569,7 +1569,7 @@ inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
}
void *
struct sk_buff *
sock_wmalloc(struct sock *sk, unsigned long size, int force,
int priority)
{
......@@ -1588,7 +1588,7 @@ sock_wmalloc(struct sock *sk, unsigned long size, int force,
}
void *
struct sk_buff *
sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority)
{
if (sk) {
......
......@@ -160,10 +160,10 @@ struct sock {
};
struct proto {
void *(*wmalloc)(struct sock *sk,
struct sk_buff * (*wmalloc)(struct sock *sk,
unsigned long size, int force,
int priority);
void *(*rmalloc)(struct sock *sk,
struct sk_buff * (*rmalloc)(struct sock *sk,
unsigned long size, int force,
int priority);
void (*wfree)(struct sock *sk, void *mem,
......@@ -192,7 +192,7 @@ struct proto {
struct options *opt, int len, int tos, int ttl);
int (*connect)(struct sock *sk,
struct sockaddr_in *usin, int addr_len);
struct sock *(*accept) (struct sock *sk, int flags);
struct sock * (*accept) (struct sock *sk, int flags);
void (*queue_xmit)(struct sock *sk,
struct device *dev, struct sk_buff *skb,
int free);
......@@ -215,7 +215,7 @@ struct proto {
char *optval, int *option);
unsigned short max_header;
unsigned long retransmits;
struct sock *sock_array[SOCK_ARRAY_SIZE];
struct sock * sock_array[SOCK_ARRAY_SIZE];
char name[80];
};
......@@ -224,6 +224,7 @@ struct proto {
#define TIME_KEEPOPEN 3
#define TIME_DESTROY 4
#define TIME_DONE 5 /* used to absorb those last few packets */
#define TIME_PROBE0 6
#define SOCK_DESTROY_TIME 1000 /* about 10 seconds */
#define PROT_SOCK 1024 /* Sockets 0-1023 can't be bound too unless you are superuser */
......@@ -241,10 +242,10 @@ extern struct sock *get_sock(struct proto *, unsigned short,
unsigned long, unsigned short,
unsigned long);
extern void print_sk(struct sock *);
extern void *sock_wmalloc(struct sock *sk,
extern struct sk_buff *sock_wmalloc(struct sock *sk,
unsigned long size, int force,
int priority);
extern void *sock_rmalloc(struct sock *sk,
extern struct sk_buff *sock_rmalloc(struct sock *sk,
unsigned long size, int force,
int priority);
extern void sock_wfree(struct sock *sk, void *mem,
......
......@@ -175,7 +175,7 @@ diff(unsigned long seq1, unsigned long seq2)
static int tcp_select_window(struct sock *sk)
{
int new_window=sk->prot->rspace(sk)/2;
int new_window = sk->prot->rspace(sk);
/* Enforce RFC793 - we've offered it we must live with it */
if(new_window<sk->window)
......@@ -592,25 +592,47 @@ tcp_send_check(struct tcphdr *th, unsigned long saddr,
return;
}
static struct sk_buff * dequeue_partial(struct sock * sk)
{
struct sk_buff * skb;
unsigned long flags;
static void
tcp_send_partial(struct sock *sk)
save_flags(flags);
cli();
skb = sk->send_tmp;
if (skb) {
sk->send_tmp = skb->next;
skb->next = NULL;
}
restore_flags(flags);
return skb;
}
static void enqueue_partial(struct sk_buff * skb, struct sock * sk)
{
struct sk_buff *skb;
unsigned long flags;
if (sk == NULL || sk->send_tmp == NULL) return;
save_flags(flags);
cli();
skb->next = sk->send_tmp;
sk->send_tmp = skb;
restore_flags(flags);
}
skb = sk->send_tmp;
static void tcp_send_partial(struct sock *sk)
{
struct sk_buff *skb;
if (sk == NULL)
return;
while ((skb = dequeue_partial(sk)) != NULL) {
/* If we have queued a header size packet.. */
if(skb->len-(unsigned long)skb->h.th + (unsigned long)skb->data == sizeof(struct tcphdr))
{
if(skb->len-(unsigned long)skb->h.th + (unsigned long)skb->data == sizeof(struct tcphdr)) {
/* If its got a syn or fin its notionally included in the size..*/
if(!skb->h.th->syn && !skb->h.th->fin)
{
if(!skb->h.th->syn && !skb->h.th->fin) {
printk("tcp_send_partial: attempt to queue a bogon.\n");
kfree_skb(skb,FREE_WRITE);
sk->send_tmp=NULL;
return;
}
}
......@@ -636,10 +658,15 @@ tcp_send_partial(struct sock *sk)
sk->wback->next = skb;
}
sk->wback = skb;
if (before(sk->window_seq, sk->wfront->h.seq) &&
sk->send_head == NULL &&
sk->ack_backlog == 0)
reset_timer(sk, TIME_PROBE0,
backoff(sk->backoff) * (2 * sk->mdev + sk->rtt));
} else {
sk->prot->queue_xmit(sk, skb->dev, skb,0);
}
sk->send_tmp = NULL;
}
}
......@@ -660,7 +687,7 @@ tcp_send_ack(unsigned long sequence, unsigned long ack,
* We need to grab some memory, and put together an ack,
* and then put it into the queue to be sent.
*/
buff = (struct sk_buff *) sk->prot->wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
buff = sk->prot->wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
if (buff == NULL) {
/* Force it to send an ack. */
sk->ack_backlog++;
......@@ -749,7 +776,6 @@ tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
return(sizeof(*th));
}
/*
* This routine copies from a user buffer into a socket,
* and starts the transmit system.
......@@ -762,6 +788,7 @@ tcp_write(struct sock *sk, unsigned char *from,
int copy;
int tmp;
struct sk_buff *skb;
struct sk_buff *send_tmp;
unsigned char *buff;
struct proto *prot;
struct device *dev = NULL;
......@@ -842,11 +869,9 @@ tcp_write(struct sock *sk, unsigned char *from,
}
/* Now we need to check if we have a half built packet. */
if (sk->send_tmp != NULL) {
if ((skb = dequeue_partial(sk)) != NULL) {
int hdrlen;
skb = sk->send_tmp;
/* IP header + TCP header */
hdrlen = ((unsigned long)skb->h.th - (unsigned long)skb->data)
+ sizeof(struct tcphdr);
......@@ -869,7 +894,7 @@ tcp_write(struct sock *sk, unsigned char *from,
len -= copy;
sk->send_seq += copy;
}
enqueue_partial(skb, sk);
if ((skb->len - hdrlen) >= sk->mtu || (flags & MSG_OOB)) {
tcp_send_partial(sk);
}
......@@ -895,22 +920,15 @@ tcp_write(struct sock *sk, unsigned char *from,
if (sk->packets_out && copy < sk->mtu && !(flags & MSG_OOB)) {
/* We will release the socket incase we sleep here. */
release_sock(sk);
skb = (struct sk_buff *) prot->wmalloc(sk,
sk->mtu + 128 + prot->max_header +
sizeof(*skb), 0, GFP_KERNEL);
skb = prot->wmalloc(sk, sk->mtu + 128 + prot->max_header + sizeof(*skb), 0, GFP_KERNEL);
sk->inuse = 1;
sk->send_tmp = skb;
if (skb != NULL)
skb->mem_len = sk->mtu + 128 + prot->max_header + sizeof(*skb);
send_tmp = skb;
} else {
/* We will release the socket incase we sleep here. */
release_sock(sk);
skb = (struct sk_buff *) prot->wmalloc(sk,
copy + prot->max_header +
sizeof(*skb), 0, GFP_KERNEL);
skb = prot->wmalloc(sk, copy + prot->max_header + sizeof(*skb), 0, GFP_KERNEL);
sk->inuse = 1;
if (skb != NULL)
skb->mem_len = copy+prot->max_header + sizeof(*skb);
send_tmp = NULL;
}
/* If we didn't get any memory, we need to sleep. */
......@@ -943,7 +961,6 @@ tcp_write(struct sock *sk, unsigned char *from,
continue;
}
skb->mem_addr = skb;
skb->len = 0;
skb->sk = sk;
skb->free = 0;
......@@ -990,7 +1007,10 @@ tcp_write(struct sock *sk, unsigned char *from,
skb->free = 0;
sk->send_seq += copy;
if (sk->send_tmp != NULL) continue;
if (send_tmp != NULL) {
enqueue_partial(send_tmp, sk);
continue;
}
tcp_send_check((struct tcphdr *)buff, sk->saddr, sk->daddr,
copy + sizeof(struct tcphdr), sk);
......@@ -1011,6 +1031,11 @@ tcp_write(struct sock *sk, unsigned char *from,
sk->wback->next = skb;
}
sk->wback = skb;
if (before(sk->window_seq, sk->wfront->h.seq) &&
sk->send_head == NULL &&
sk->ack_backlog == 0)
reset_timer(sk, TIME_PROBE0,
backoff(sk->backoff) * (2 * sk->mdev + sk->rtt));
} else {
prot->queue_xmit(sk, dev, skb,0);
}
......@@ -1075,7 +1100,7 @@ tcp_read_wakeup(struct sock *sk)
* We need to grab some memory, and put together an ack,
* and then put it into the queue to be sent.
*/
buff = (struct sk_buff *) sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
if (buff == NULL) {
/* Try again real soon. */
reset_timer(sk, TIME_WRITE, 10);
......@@ -1550,7 +1575,7 @@ tcp_shutdown(struct sock *sk, int how)
prot =(struct proto *)sk->prot;
th =(struct tcphdr *)&sk->dummy_th;
release_sock(sk); /* incase the malloc sleeps. */
buff = (struct sk_buff *) prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
buff = prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
if (buff == NULL) return;
sk->inuse = 1;
......@@ -1659,7 +1684,7 @@ tcp_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
* We need to grab some memory, and put together an RST,
* and then put it into the queue to be sent.
*/
buff = (struct sk_buff *) prot->wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
buff = prot->wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
if (buff == NULL)
return;
......@@ -1891,7 +1916,7 @@ tcp_conn_request(struct sock *sk, struct sk_buff *skb,
/* this will min with what arrived in the packet */
tcp_options(newsk,skb->h.th);
buff = (struct sk_buff *) newsk->prot->wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
buff = newsk->prot->wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
if (buff == NULL) {
sk->err = -ENOMEM;
newsk->dead = 1;
......@@ -2039,7 +2064,7 @@ tcp_close(struct sock *sk, int timeout)
case TCP_SYN_RECV:
prot =(struct proto *)sk->prot;
th =(struct tcphdr *)&sk->dummy_th;
buff = (struct sk_buff *) prot->wmalloc(sk, MAX_FIN_SIZE, 1, GFP_ATOMIC);
buff = prot->wmalloc(sk, MAX_FIN_SIZE, 1, GFP_ATOMIC);
if (buff == NULL) {
/* This will force it to try again later. */
/* Or it would have if someone released the socket
......@@ -2288,7 +2313,8 @@ tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
sk->window_seq = ack + ntohs(th->window);
/* We don't want too many packets out there. */
if (sk->cong_window < 2048 && ack != sk->rcv_ack_seq) {
if (sk->timeout == TIME_WRITE &&
sk->cong_window < 2048 && ack != sk->rcv_ack_seq) {
if (sk->exp_growth) sk->cong_window *= 2;
else sk->cong_window++;
}
......@@ -2296,6 +2322,19 @@ tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
DPRINTF((DBG_TCP, "tcp_ack: Updating rcv ack sequence.\n"));
sk->rcv_ack_seq = ack;
/*
* if this ack opens up a zero window, clear backoff. It was
* being used to time the probes, and is probably far higher than
* it needs to be for normal retransmission
*/
if (sk->timeout == TIME_PROBE0) {
if (sk->wfront != NULL && /* should always be non-null */
! before (sk->window_seq, sk->wfront->h.seq)) {
sk->retransmits = 0;
sk->backoff = 0;
}
}
/* See if we can take anything off of the retransmit queue. */
while(sk->send_head != NULL) {
/* Check for a bug. */
......@@ -2348,6 +2387,20 @@ tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
if (/* sk->retransmits == 0 && */ !(flag&2)) {
long abserr, rtt = jiffies - oskb->when;
/*
* Berkeley's code puts these limits on a separate timeout
* field, not on the RTT estimate itself. However the way this
* code is done, that would complicate things. If we're going
* to clamp the values, we have to do so before calculating
* the mdev, or we'll get unreasonably large mdev's. Experience
* shows that with a minium rtt of .1 sec, we get spurious
* retransmits, due to delayed acks on some hosts. Berkeley uses
* 1 sec, so why not?
*/
if (rtt < 100) rtt = 100; /* 1 sec */
if (rtt > 12000) rtt = 12000; /* 2 min - max rtt allowed by protocol */
if (sk->state == TCP_SYN_SENT || sk->state == TCP_SYN_RECV) {
/* first ack, so nothing else to average with */
sk->rtt = rtt;
......@@ -2361,10 +2414,6 @@ tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
sk->backoff = 0;
}
flag |= (2|4);
/* no point retransmitting faster than .1 sec */
/* 2 minutes is max legal rtt for Internet */
if (sk->rtt < 10) sk->rtt = 10;
if (sk->rtt > 12000) sk->rtt = 12000;
cli();
......@@ -2398,6 +2447,12 @@ tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
&& sk->packets_out < sk->cong_window) {
flag |= 1;
tcp_write_xmit(sk);
} else if (before(sk->window_seq, sk->wfront->h.seq) &&
sk->send_head == NULL &&
sk->ack_backlog == 0 &&
sk->state != TCP_TIME_WAIT) {
reset_timer(sk, TIME_PROBE0,
backoff(sk->backoff) * (2 * sk->mdev + sk->rtt));
}
} else {
if (sk->send_head == NULL && sk->ack_backlog == 0 &&
......@@ -2884,7 +2939,7 @@ tcp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
sk->dummy_th.dest = sin.sin_port;
release_sock(sk);
buff = (struct sk_buff *) sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
buff = sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
if (buff == NULL) {
return(-ENOMEM);
}
......@@ -3459,7 +3514,7 @@ tcp_write_wakeup(struct sock *sk)
if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) return;
buff = (struct sk_buff *) sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
if (buff == NULL) return;
buff->mem_addr = buff;
......@@ -3507,6 +3562,90 @@ tcp_write_wakeup(struct sock *sk)
sk->prot->queue_xmit(sk, dev, buff, 1);
}
/*
* This routine probes a zero window. It makes a copy of the first
* packet in the write queue, but with just one byte of data.
*/
void
tcp_send_probe0(struct sock *sk)
{
unsigned char *raw;
struct iphdr *iph;
struct sk_buff *skb2, *skb;
int len, hlen, data;
struct tcphdr *t1;
struct device *dev;
if (sk->zapped)
return; /* Afer a valid reset we can send no more */
if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT &&
sk -> state != TCP_FIN_WAIT1 && sk->state != TCP_FIN_WAIT2)
return;
skb = sk->wfront;
if (skb == NULL)
return;
dev = skb->dev;
/* I know this can't happen but as it does.. */
if(dev==NULL)
{
printk("tcp_send_probe0: NULL device bug!\n");
return;
}
IS_SKB(skb);
raw = skb->data;
iph = (struct iphdr *) (raw + dev->hard_header_len);
hlen = (iph->ihl * sizeof(unsigned long)) + dev->hard_header_len;
data = skb->len - hlen - sizeof(struct tcphdr);
len = hlen + sizeof(struct tcphdr) + (data ? 1 : 0);
/* Allocate buffer. */
if ((skb2 = alloc_skb(sizeof(struct sk_buff) + len,GFP_KERNEL)) == NULL) {
/* printk("alloc failed raw %x th %x hlen %d data %d len %d\n",
raw, skb->h.th, hlen, data, len); */
reset_timer (sk, TIME_PROBE0, 10); /* try again real soon */
return;
}
skb2->arp = skb->arp;
skb2->len = len;
skb2->h.raw = (char *)(skb2->data);
sk->wmem_alloc += skb2->mem_len;
/* Copy the packet header into the new buffer. */
memcpy(skb2->h.raw, raw, len);
skb2->h.raw += hlen; /* it's now h.th -- pointer to the tcp header */
t1 = skb2->h.th;
/* source, dest, seq, from existing packet */
t1->ack_seq = ntohl(sk->acked_seq);
t1->res1 = 0;
/* doff, fin, from existing packet. Fin is safe because Linux always
* sends fin in a separate packet
* syn, rst, had better be zero in original */
t1->ack = 1;
t1->urg = 0; /* urgent pointer might be beyond this fragment */
t1->res2 = 0;
t1->window = ntohs(tcp_select_window(sk)/*sk->prot->rspace(sk)*/);
t1->urg_ptr = 0;
tcp_send_check(t1, sk->saddr, sk->daddr, len - hlen, sk);
/* Send it and free it.
* This will prevent the timer from automatically being restarted.
*/
sk->prot->queue_xmit(sk, dev, skb2, 1);
sk->backoff++;
reset_timer (sk, TIME_PROBE0,
backoff (sk->backoff) * (2 * sk->mdev + sk->rtt));
sk->retransmits++;
sk->prot->retransmits ++;
}
/*
* Socket option code for TCP.
*/
......@@ -3529,7 +3668,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval, int op
switch(optname)
{
case TCP_MSS:
case TCP_MAXSEG:
if(val<200||val>2048 || val>sk->mtu)
return -EINVAL;
sk->mss=val;
......@@ -3551,7 +3690,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval, int *o
switch(optname)
{
case TCP_MSS:
case TCP_MAXSEG:
val=sk->mss;
break;
case TCP_NODELAY:
......
......@@ -41,8 +41,8 @@
* 90 minutes to time out.
*/
#define TCP_TIMEOUT_LEN 5000 /* should be about 5 mins */
#define TCP_TIMEWAIT_LEN 1000 /* how long to wait to sucessfully
#define TCP_TIMEOUT_LEN (5*60*HZ)/* should be about 5 mins */
#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to sucessfully
* close the socket, about 60 seconds */
#define TCP_ACK_TIME 3000 /* time to delay before sending an ACK */
#define TCP_DONE_TIME 250 /* maximum time to wait before actually
......
......@@ -148,6 +148,10 @@ net_timer (unsigned long data)
reset_timer (sk, TIME_DESTROY, TCP_DONE_TIME);
release_sock (sk);
break;
case TIME_PROBE0:
tcp_send_probe0(sk);
release_sock (sk);
break;
case TIME_WRITE: /* try to retransmit. */
/* It could be we got here because we needed to send an ack.
* So we need to check for that.
......
......@@ -234,7 +234,7 @@ udp_send(struct sock *sk, struct sockaddr_in *sin,
/* Allocate a copy of the packet. */
size = sizeof(struct sk_buff) + sk->prot->max_header + len;
skb = (struct sk_buff *) sk->prot->wmalloc(sk, size, 0, GFP_KERNEL);
skb = sk->prot->wmalloc(sk, size, 0, GFP_KERNEL);
if (skb == NULL) return(-ENOMEM);
skb->mem_addr = skb;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment