Commit ca3dd564 authored by Ben Collins's avatar Ben Collins Committed by Linus Torvalds

[PATCH] IEEE1394 updates

This is against 2.5.25. Lots of fixes. Brings things inline with 2.5.25.
Probably makes the subsystem actually work now (well, it will work, but
I bet without it, it doesn't). Merged in changes from the current 2.5.x
source.
parent 3617d270
......@@ -9,22 +9,33 @@ if [ "$CONFIG_PCI" = "y" -a "$CONFIG_EXPERIMENTAL" = "y" ]; then
if [ "$CONFIG_IEEE1394" != "n" ]; then
comment "Device Drivers"
dep_tristate ' Texas Instruments PCILynx support' CONFIG_IEEE1394_PCILYNX $CONFIG_IEEE1394
if [ "$CONFIG_IEEE1394_PCILYNX" != "n" ]; then
bool ' Use PCILynx local RAM' CONFIG_IEEE1394_PCILYNX_LOCALRAM
bool ' Support for non-IEEE1394 local ports' CONFIG_IEEE1394_PCILYNX_PORTS
if [ "$CONFIG_I2C" = "n" -o "$CONFIG_I2C_ALGOBIT" = "n" ]; then
comment ' Texas Instruments PCILynx requires I2C bit-banging'
else
dep_tristate ' Texas Instruments PCILynx support' CONFIG_IEEE1394_PCILYNX $CONFIG_IEEE1394 $CONFIG_I2C $CONFIG_I2C_ALGOBIT
fi
# Non-maintained pcilynx options
# if [ "$CONFIG_IEEE1394_PCILYNX" != "n" ]; then
# bool ' Use PCILynx local RAM' CONFIG_IEEE1394_PCILYNX_LOCALRAM
# bool ' Support for non-IEEE1394 local ports' CONFIG_IEEE1394_PCILYNX_PORTS
# fi
dep_tristate ' OHCI-1394 support' CONFIG_IEEE1394_OHCI1394 $CONFIG_IEEE1394
comment "Protocol Drivers"
dep_tristate ' OHCI-1394 Video support' CONFIG_IEEE1394_VIDEO1394 $CONFIG_IEEE1394_OHCI1394
dep_tristate ' SBP-2 support (Harddisks etc.)' CONFIG_IEEE1394_SBP2 $CONFIG_SCSI $CONFIG_IEEE1394
if [ "$CONFIG_IEEE1394_SBP2" != "n" ]; then
bool ' Enable Phys DMA support for SBP2 (Debug)' CONFIG_IEEE1394_SBP2_PHYS_DMA
fi
dep_tristate ' Ethernet over 1394' CONFIG_IEEE1394_ETH1394 $CONFIG_IEEE1394
dep_tristate ' OHCI-DV I/O support' CONFIG_IEEE1394_DV1394 $CONFIG_IEEE1394_OHCI1394
dep_tristate ' Raw IEEE1394 I/O support' CONFIG_IEEE1394_RAWIO $CONFIG_IEEE1394
dep_tristate ' IEC61883-1 Plug support' CONFIG_IEEE1394_CMP $CONFIG_IEEE1394
if [ "$CONFIG_IEEE1394_PCILYNX" != "n" ]; then
if [ "$CONFIG_IEEE1394_CMP" != "n" ]; then
dep_tristate ' IEC61883-6 (Audio transmission) support' CONFIG_IEEE1394_AMDTP $CONFIG_IEEE1394_OHCI1394 $CONFIG_IEEE1394_CMP
fi
......
......@@ -2,8 +2,11 @@
# Makefile for the Linux IEEE 1394 implementation
#
O_TARGET := ieee1394drv.o
export-objs := ieee1394_core.o ohci1394.o cmp.o
list-multi := ieee1394.o
ieee1394-objs := ieee1394_core.o ieee1394_transactions.o hosts.o \
highlevel.o csr.o nodemgr.o
......@@ -19,3 +22,6 @@ obj-$(CONFIG_IEEE1394_AMDTP) += amdtp.o
obj-$(CONFIG_IEEE1394_CMP) += cmp.o
include $(TOPDIR)/Rules.make
ieee1394.o: $(ieee1394-objs)
$(LD) -r -o $@ $(ieee1394-objs)
......@@ -1110,7 +1110,7 @@ MODULE_LICENSE("GPL");
static int __init amdtp_init_module (void)
{
if (ieee1394_register_chardev(IEEE1394_MINOR_BLOCK_EXPERIMENTAL,
if (ieee1394_register_chardev(IEEE1394_MINOR_BLOCK_AMDTP,
THIS_MODULE, &amdtp_fops)) {
HPSB_ERR("amdtp: unable to get minor device block");
return -EIO;
......@@ -1120,7 +1120,7 @@ static int __init amdtp_init_module (void)
&amdtp_highlevel_ops);
if (amdtp_highlevel == NULL) {
HPSB_ERR("amdtp: unable to register highlevel ops");
ieee1394_unregister_chardev(IEEE1394_MINOR_BLOCK_EXPERIMENTAL);
ieee1394_unregister_chardev(IEEE1394_MINOR_BLOCK_AMDTP);
return -EIO;
}
......@@ -1132,7 +1132,7 @@ static int __init amdtp_init_module (void)
static void __exit amdtp_exit_module (void)
{
hpsb_unregister_highlevel(amdtp_highlevel);
ieee1394_unregister_chardev(IEEE1394_MINOR_BLOCK_EXPERIMENTAL);
ieee1394_unregister_chardev(IEEE1394_MINOR_BLOCK_AMDTP);
HPSB_INFO("Unloaded AMDTP driver");
}
......
......@@ -460,7 +460,7 @@ struct video_card {
This is a regular int, but use test_and_set_bit() (on bit zero)
for atomicity.
*/
int open;
unsigned long open;
/*
2) the spinlock - this provides mutual exclusion between the interrupt
......
......@@ -53,6 +53,7 @@
via pci_alloc_consistent()
DONE:
- restart IT DMA after a bus reset
- safely obtain and release ISO Tx channels in cooperation with OHCI driver
- map received DIF blocks to their proper location in DV frame (ensure
recovery if dropped packet)
......@@ -192,71 +193,19 @@ static inline struct video_card* file_to_video_card(struct file *file)
/* Memory management functions */
/*******************************/
#define MDEBUG(x) do { } while(0) /* Debug memory management */
/* [DaveM] I've recoded most of this so that:
* 1) It's easier to tell what is happening
* 2) It's more portable, especially for translating things
* out of vmalloc mapped areas in the kernel.
* 3) Less unnecessary translations happen.
*
* The code used to assume that the kernel vmalloc mappings
* existed in the page tables of every process, this is simply
* not guarenteed. We now use pgd_offset_k which is the
* defined way to get at the kernel page tables.
*/
/* Given PGD from the address space's page table, return the kernel
* virtual mapping of the physical memory mapped at ADR.
*/
static inline struct page *uvirt_to_page(pgd_t *pgd, unsigned long adr)
{
pmd_t *pmd;
pte_t *ptep, pte;
struct page *ret = NULL;
if (!pgd_none(*pgd)) {
pmd = pmd_offset(pgd, adr);
if (!pmd_none(*pmd)) {
ptep = pte_offset_kernel(pmd, adr);
pte = *ptep;
if(pte_present(pte))
ret = pte_page(pte);
}
}
return ret;
}
/* Here we want the physical address of the memory.
* This is used when initializing the contents of the
* area and marking the pages as reserved, and for
* handling page faults on the rvmalloc()ed buffer
*/
static inline unsigned long kvirt_to_pa(unsigned long adr)
{
unsigned long va, kva, ret;
va = VMALLOC_VMADDR(adr);
kva = (unsigned long) page_address(uvirt_to_page(pgd_offset_k(va), va));
kva |= adr & (PAGE_SIZE-1); /* restore the offset */
ret = __pa(kva);
MDEBUG(printk("kv2pa(%lx-->%lx)", adr, ret));
return ret;
}
static void * rvmalloc(unsigned long size)
{
void * mem;
unsigned long adr, page;
unsigned long adr;
size = PAGE_ALIGN(size);
mem=vmalloc_32(size);
if (mem) {
memset(mem, 0, size); /* Clear the ram out,
no junk to the user */
adr=(unsigned long) mem;
while (size > 0) {
page = kvirt_to_pa(adr);
mem_map_reserve(virt_to_page(__va(page)));
mem_map_reserve(vmalloc_to_page((void *)adr));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
......@@ -266,13 +215,12 @@ static void * rvmalloc(unsigned long size)
static void rvfree(void * mem, unsigned long size)
{
unsigned long adr, page;
unsigned long adr;
if (mem) {
adr=(unsigned long) mem;
while (size > 0) {
page = kvirt_to_pa(adr);
mem_map_unreserve(virt_to_page(__va(page)));
mem_map_unreserve(vmalloc_to_page((void *)adr));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
......@@ -1166,9 +1114,9 @@ static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
/* fill the sglist with the kernel addresses of pages in the non-contiguous buffer */
for(i = 0; i < video->user_dma.n_pages; i++) {
unsigned long va = VMALLOC_VMADDR( (unsigned long) video->user_buf + i * PAGE_SIZE );
unsigned long va = (unsigned long) video->user_buf + i * PAGE_SIZE;
video->user_dma.sglist[i].page = uvirt_to_page(pgd_offset_k(va), va);
video->user_dma.sglist[i].page = vmalloc_to_page((void *)va);
video->user_dma.sglist[i].length = PAGE_SIZE;
}
......@@ -1492,7 +1440,7 @@ static int do_dv1394_shutdown(struct video_card *video, int free_user_buf)
static struct page * dv1394_nopage(struct vm_area_struct * area, unsigned long address, int write_access)
{
unsigned long offset;
unsigned long page, kernel_virt_addr;
unsigned long kernel_virt_addr;
struct page *ret = NOPAGE_SIGBUS;
struct video_card *video = (struct video_card*) area->vm_private_data;
......@@ -1510,10 +1458,7 @@ static struct page * dv1394_nopage(struct vm_area_struct * area, unsigned long a
offset = address - area->vm_start;
kernel_virt_addr = (unsigned long) video->user_buf + offset;
page = kvirt_to_pa(kernel_virt_addr);
ret = virt_to_page(__va(page));
ret = vmalloc_to_page((void *)kernel_virt_addr);
get_page(ret);
out:
......@@ -2936,9 +2881,129 @@ static void dv1394_add_host (struct hpsb_host *host)
dv1394_init(ohci, DV1394_PAL, MODE_TRANSMIT);
}
/* Bus reset handler. In the event of a bus reset, we may need to
re-start the DMA contexts - otherwise the user program would
end up waiting forever.
*/
static void dv1394_host_reset(struct hpsb_host *host)
{
struct ti_ohci *ohci;
struct video_card *video = NULL;
unsigned long flags;
struct list_head *lh;
/* We only work with the OHCI-1394 driver */
if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
return;
ohci = (struct ti_ohci *)host->hostdata;
/* find the corresponding video_cards */
spin_lock_irqsave(&dv1394_cards_lock, flags);
if(!list_empty(&dv1394_cards)) {
list_for_each(lh, &dv1394_cards) {
video = list_entry(lh, struct video_card, list);
if((video->id >> 2) == ohci->id)
break;
}
}
spin_unlock_irqrestore(&dv1394_cards_lock, flags);
if(!video)
return;
/* check IT context */
if(video->ohci_it_ctx != -1) {
u32 ctx;
spin_lock_irqsave(&video->spinlock, flags);
ctx = reg_read(video->ohci, video->ohci_IsoXmitContextControlSet);
/* if(RUN but not ACTIVE) */
if( (ctx & (1<<15)) &&
!(ctx & (1<<10)) ) {
debug_printk("dv1394: IT context stopped due to bus reset; waking it up\n");
/* to be safe, assume a frame has been dropped. User-space programs
should handle this condition like an underflow. */
video->dropped_frames++;
/* for some reason you must clear, then re-set the RUN bit to restart DMA */
/* clear RUN */
reg_write(video->ohci, video->ohci_IsoXmitContextControlClear, (1 << 15));
flush_pci_write(video->ohci);
/* set RUN */
reg_write(video->ohci, video->ohci_IsoXmitContextControlSet, (1 << 15));
flush_pci_write(video->ohci);
/* set the WAKE bit (just in case; this isn't strictly necessary) */
reg_write(video->ohci, video->ohci_IsoXmitContextControlSet, (1 << 12));
flush_pci_write(video->ohci);
irq_printk("dv1394: AFTER IT restart ctx 0x%08x ptr 0x%08x\n",
reg_read(video->ohci, video->ohci_IsoXmitContextControlSet),
reg_read(video->ohci, video->ohci_IsoXmitCommandPtr));
}
spin_unlock_irqrestore(&video->spinlock, flags);
}
/* check IR context */
if(video->ohci_ir_ctx != -1) {
u32 ctx;
spin_lock_irqsave(&video->spinlock, flags);
ctx = reg_read(video->ohci, video->ohci_IsoRcvContextControlSet);
/* if(RUN but not ACTIVE) */
if( (ctx & (1<<15)) &&
!(ctx & (1<<10)) ) {
debug_printk("dv1394: IR context stopped due to bus reset; waking it up\n");
/* to be safe, assume a frame has been dropped. User-space programs
should handle this condition like an overflow. */
video->dropped_frames++;
/* for some reason you must clear, then re-set the RUN bit to restart DMA */
/* XXX this doesn't work for me, I can't get IR DMA to restart :[ */
/* clear RUN */
reg_write(video->ohci, video->ohci_IsoRcvContextControlClear, (1 << 15));
flush_pci_write(video->ohci);
/* set RUN */
reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 15));
flush_pci_write(video->ohci);
/* set the WAKE bit (just in case; this isn't strictly necessary) */
reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 12));
flush_pci_write(video->ohci);
irq_printk("dv1394: AFTER IR restart ctx 0x%08x ptr 0x%08x\n",
reg_read(video->ohci, video->ohci_IsoRcvContextControlSet),
reg_read(video->ohci, video->ohci_IsoRcvCommandPtr));
}
spin_unlock_irqrestore(&video->spinlock, flags);
}
/* wake readers/writers/ioctl'ers */
wake_up_interruptible(&video->waitq);
}
static struct hpsb_highlevel_ops hl_ops = {
add_host: dv1394_add_host,
remove_host: dv1394_remove_host,
host_reset: dv1394_host_reset,
};
......
......@@ -123,6 +123,67 @@
frame 0. Then call DV1394_SUBMIT_FRAMES to inform the device that
it may transmit the new frames.
ERROR HANDLING
An error (buffer underflow/overflow or a break in the DV stream due
to a 1394 bus reset) can be detected by checking the dropped_frames
field of struct dv1394_status (obtained through the
DV1394_GET_STATUS ioctl).
The best way to recover from such an error is to re-initialize
dv1394, either by using the DV1394_INIT ioctl call, or closing the
file descriptor and opening it again. (note that you must unmap all
ringbuffer mappings when closing the file descriptor, or else
dv1394 will still be considered 'in use').
MAIN LOOP
For maximum efficiency and robustness against bus errors, you are
advised to model the main loop of your application after the
following pseudo-code example:
(checks of system call return values omitted for brevity; always
check return values in your code!)
while( frames left ) {
struct pollfd *pfd = ...;
pfd->fd = dv1394_fd;
pfd->revents = 0;
pfd->events = POLLOUT | POLLIN; (OUT for transmit, IN for receive)
(add other sources of I/O here)
poll(pfd, 1, -1); (or select(); add a timeout if you want)
if(pfd->revents) {
struct dv1394_status status;
ioctl(dv1394_fd, DV1394_GET_STATUS, &status);
if(status.dropped_frames > 0) {
reset_dv1394();
} else {
for(int i = 0; i < status.n_clear_frames; i++) {
copy_DV_frame();
}
}
}
}
where copy_DV_frame() reads or writes on the dv1394 file descriptor
(read/write mode) or copies data to/from the mmap ringbuffer and
then calls ioctl(DV1394_SUBMIT_FRAMES) to notify dv1394 that new
frames are availble (mmap mode).
reset_dv1394() is called in the event of a buffer
underflow/overflow or a halt in the DV stream (e.g. due to a 1394
bus reset). To guarantee recovery from the error, this function
should close the dv1394 file descriptor (and munmap() all
ringbuffer mappings, if you are using them), then re-open the
dv1394 device (and re-map the ringbuffer).
*/
......@@ -218,6 +279,13 @@ struct dv1394_init {
unsigned int syt_offset;
};
/* NOTE: you may only allocate the DV frame ringbuffer once each time
you open the dv1394 device. DV1394_INIT will fail if you call it a
second time with different 'n_frames' or 'format' arguments (which
would imply a different size for the ringbuffer). If you need a
different buffer size, simply close and re-open the device, then
initialize it with your new settings. */
/* Q: What are cip_n and cip_d? */
/*
......@@ -262,8 +330,9 @@ struct dv1394_status {
ready to be filled with data */
unsigned int n_clear_frames;
/* how many times the DV output has underflowed
since the last call to DV1394_GET_STATUS */
/* how many times the DV stream has underflowed, overflowed,
or otherwise encountered an error, since the previous call
to DV1394_GET_STATUS */
unsigned int dropped_frames;
/* N.B. The dropped_frames counter is only a lower bound on the actual
......
......@@ -124,7 +124,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra)
h = kmalloc(sizeof(struct hpsb_host) + extra, SLAB_KERNEL);
if (!h) return NULL;
memset(h, 0, sizeof(struct hpsb_host));
memset(h, 0, sizeof(struct hpsb_host) + extra);
h->hostdata = h + 1;
h->driver = drv;
......
......@@ -31,6 +31,7 @@ struct hpsb_host {
u32 tlabel_pool[2];
struct semaphore tlabel_count;
spinlock_t tlabel_lock;
u32 tlabel_current;
unsigned char iso_listen_count[64];
......
......@@ -740,7 +740,8 @@ void abort_requests(struct hpsb_host *host)
host->ops->devctl(host, CANCEL_REQUESTS, 0);
spin_lock_irqsave(&host->pending_pkt_lock, flags);
list_splice_init(&host->pending_packets, &llist);
list_splice(&host->pending_packets, &llist);
INIT_LIST_HEAD(&host->pending_packets);
spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
list_for_each(lh, &llist) {
......@@ -905,17 +906,16 @@ static int ieee1394_dispatch_open(struct inode *inode, struct file *file)
/* printk("ieee1394_dispatch_open(%d)", blocknum); */
/* lock the whole kernel here, to prevent a driver from
being unloaded between the file_ops lookup and the open */
lock_kernel();
read_lock(&ieee1394_chardevs_lock);
file_ops = ieee1394_chardevs[blocknum].file_ops;
module = ieee1394_chardevs[blocknum].module;
/* bump the reference count of the driver that
will receive the open() */
INCREF(module);
file_ops = ieee1394_chardevs[blocknum].file_ops;
read_unlock(&ieee1394_chardevs_lock);
if(file_ops == NULL) {
DECREF(module);
goto out_fail;
}
......@@ -923,10 +923,6 @@ static int ieee1394_dispatch_open(struct inode *inode, struct file *file)
own file_operations */
file->f_op = file_ops;
/* bump the reference count of the driver that
will receive the open() */
INCREF(module);
/* at this point BOTH ieee1394 and the task-specific driver have
an extra reference */
......@@ -955,7 +951,6 @@ static int ieee1394_dispatch_open(struct inode *inode, struct file *file)
and will be dropped by the VFS when the file is
released. */
unlock_kernel();
return 0;
}
......@@ -965,7 +960,6 @@ static int ieee1394_dispatch_open(struct inode *inode, struct file *file)
function returns. */
file->f_op = &ieee1394_chardev_ops;
unlock_kernel();
return retval;
#undef INCREF
......
......@@ -182,6 +182,7 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
#define IEEE1394_MINOR_BLOCK_RAW1394 0
#define IEEE1394_MINOR_BLOCK_VIDEO1394 1
#define IEEE1394_MINOR_BLOCK_DV1394 2
#define IEEE1394_MINOR_BLOCK_AMDTP 3
#define IEEE1394_MINOR_BLOCK_EXPERIMENTAL 15
/* return the index (within a minor number block) of a file */
......
......@@ -169,8 +169,9 @@ void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data)
*/
int get_tlabel(struct hpsb_host *host, nodeid_t nodeid, int wait)
{
int tlabel;
int tlabel = 0;
unsigned long flags;
int found_tlabel = 0;
if (wait) {
down(&host->tlabel_count);
......@@ -180,13 +181,16 @@ int get_tlabel(struct hpsb_host *host, nodeid_t nodeid, int wait)
spin_lock_irqsave(&host->tlabel_lock, flags);
if (host->tlabel_pool[0] != ~0) {
tlabel = ffz(host->tlabel_pool[0]);
while (!found_tlabel) {
tlabel = host->tlabel_current;
if (tlabel < 32 && !(host->tlabel_pool[0] & 1 << tlabel)) {
host->tlabel_pool[0] |= 1 << tlabel;
} else {
tlabel = ffz(host->tlabel_pool[1]);
host->tlabel_pool[1] |= 1 << tlabel;
tlabel += 32;
found_tlabel = 1;
} else if (!(host->tlabel_pool[1] & 1 << (tlabel - 32))) {
host->tlabel_pool[1] |= 1 << (tlabel - 32);
found_tlabel = 1;
}
host->tlabel_current = (host->tlabel_current + 1) % 64;
}
spin_unlock_irqrestore(&host->tlabel_lock, flags);
......
......@@ -7,7 +7,6 @@
#include <linux/version.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/string.h>
#include <asm/byteorder.h>
......
......@@ -177,7 +177,7 @@ static int nodemgr_read_quadlet(struct hpsb_host *host,
for (i = 0; i < 3; i++) {
ret = hpsb_read(host, nodeid, generation, address, quad, 4);
if (ret != -EAGAIN)
if (!ret)
break;
}
*quad = be32_to_cpu(*quad);
......@@ -985,13 +985,27 @@ static int read_businfo_block(struct hpsb_host *host, nodeid_t nodeid, unsigned
HPSB_INFO("Initiating ConfigROM request for node " NODE_BUS_FMT,
NODE_BUS_ARGS(nodeid));
#endif
/*
* Must retry a few times if config rom read returns zero (how long?). Will
* not normally occur, but we should do the right thing. For example, with
* some sbp2 devices, the bridge chipset cannot return valid config rom reads
* immediately after power-on, since they need to detect the type of
* device attached (disk or CD-ROM).
*/
for (i = 0; i < 4; i++) {
if (nodemgr_read_quadlet(host, nodeid, generation,
addr, &buffer[0]) < 0) {
HPSB_ERR("ConfigROM quadlet transaction error for node "
NODE_BUS_FMT, NODE_BUS_ARGS(nodeid));
return -1;
}
if (buffer[0])
break;
set_current_state(TASK_INTERRUPTIBLE);
if (schedule_timeout (HZ/4))
return -1;
}
header_size = buffer[0] >> 24;
addr += 4;
......@@ -1051,10 +1065,15 @@ static void nodemgr_node_probe_one(struct hpsb_host *host,
return;
if (buffer[1] != IEEE1394_BUSID_MAGIC) {
/* This isn't a 1394 device */
HPSB_ERR("Node " NODE_BUS_FMT " isn't an IEEE 1394 device",
NODE_BUS_ARGS(nodeid));
return;
/* This isn't a 1394 device, but we let it slide. There
* was a report of a device with broken firmware which
* reported '2394' instead of '1394', which is obviously a
* mistake. One would hope that a non-1394 device never
* gets connected to Firewire bus. If someone does, we
* shouldn't be held responsible, so we'll allow it with a
* warning. */
HPSB_WARN("Node " NODE_BUS_FMT " has invalid busID magic [0x%08x]",
NODE_BUS_ARGS(nodeid), buffer[1]);
}
guid = ((u64)buffer[3] << 32) | buffer[4];
......@@ -1103,11 +1122,11 @@ static void nodemgr_node_probe(struct hpsb_host *host)
nodeid_t nodeid = LOCAL_BUS;
unsigned int generation;
/* Pause for 1 second, to make sure things settle down. If
/* Pause for 1/4 second, to make sure things settle down. If
* schedule_timeout returns non-zero, it means we caught a signal
* and need to return. */
set_current_state(TASK_INTERRUPTIBLE);
if (schedule_timeout (HZ))
if (schedule_timeout (HZ/4))
return;
/* Now get the generation in which the node ID's we collect
......
......@@ -37,7 +37,7 @@
* . DMA error recovery
*
* Known bugs:
* . Apple PowerBook detected but not working yet (still true?)
* . devctl BUS_RESET arg confusion (reset type or root holdoff?)
*/
/*
......@@ -77,12 +77,6 @@
* . Config ROM generation
*/
/* Issues:
*
* - devctl BUS_RESET should treat arg as reset type
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/list.h>
......@@ -160,17 +154,21 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
printk(level "%s_%d: " fmt "\n" , OHCI1394_DRIVER_NAME, card , ## args)
static char version[] __devinitdata =
"$Revision: 1.101 $ Ben Collins <bcollins@debian.org>";
"$Rev: 504 $ Ben Collins <bcollins@debian.org>";
/* Module Parameters */
MODULE_PARM(attempt_root,"i");
MODULE_PARM_DESC(attempt_root, "Attempt to make the host root.");
MODULE_PARM_DESC(attempt_root, "Attempt to make the host root (default = 0).");
static int attempt_root = 0;
MODULE_PARM(phys_dma,"i");
MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
static int phys_dma = 1;
static void dma_trm_tasklet(unsigned long data);
static void dma_trm_reset(struct dma_trm_ctx *d);
static void __devexit ohci1394_pci_remove(struct pci_dev *pdev);
static void ohci1394_pci_remove(struct pci_dev *pdev);
static inline void ohci1394_run_irq_hooks(struct ti_ohci *ohci,
quadlet_t isoRecvEvent,
......@@ -224,7 +222,7 @@ static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
spin_lock_irqsave (&ohci->phy_reg_lock, flags);
reg_write(ohci, OHCI1394_PhyControl, (((u16)addr << 8) & 0x00000f00) | 0x00008000);
reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
for (i = 0; i < OHCI_LOOP_COUNT; i++) {
if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
......@@ -252,7 +250,7 @@ static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
spin_lock_irqsave (&ohci->phy_reg_lock, flags);
reg_write(ohci, OHCI1394_PhyControl, 0x00004000 | (((u16)addr << 8) & 0x00000f00) | data);
reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
for (i = 0; i < OHCI_LOOP_COUNT; i++) {
r = reg_read(ohci, OHCI1394_PhyControl);
......@@ -291,8 +289,6 @@ static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
size_t size;
quadlet_t q0, q1;
mdelay(10);
/* Check status of self-id reception */
if (ohci->selfid_swap)
......@@ -363,13 +359,6 @@ static void ohci_soft_reset(struct ti_ohci *ohci) {
break;
mdelay(1);
}
/* Now reenable LPS, since that's usually what we want after a
* softreset anyway. Wait 50msec to make sure we have full link
* enabled. */
reg_write(ohci, OHCI1394_HCControlSet, 0x00080000);
mdelay(50);
DBGMSG (ohci->id, "Soft reset finished");
}
......@@ -488,14 +477,42 @@ static void ohci_initialize(struct ti_ohci *ohci)
{
quadlet_t buf;
/* Start off with a soft reset, to clear everything to a sane
* state. */
ohci_soft_reset(ohci);
/* Now enable LPS, which we need in order to start accessing
* most of the registers. In fact, on some cards (ALI M5251),
* accessing registers in the SClk domain without LPS enabled
* will lock up the machine. Wait 50msec to make sure we have
* full link enabled. */
reg_write(ohci, OHCI1394_HCControlSet, 0x00080000);
mdelay(50);
/* Determine the number of available IR and IT contexts. */
ohci->nb_iso_rcv_ctx =
get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
DBGMSG(ohci->id, "%d iso receive contexts available",
ohci->nb_iso_rcv_ctx);
ohci->nb_iso_xmit_ctx =
get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
DBGMSG(ohci->id, "%d iso transmit contexts available",
ohci->nb_iso_xmit_ctx);
/* Set the usage bits for non-existent contexts so they can't
* be allocated */
ohci->ir_ctx_usage |= ~0 << ohci->nb_iso_rcv_ctx;
ohci->it_ctx_usage |= ~0 << ohci->nb_iso_xmit_ctx;
spin_lock_init(&ohci->phy_reg_lock);
spin_lock_init(&ohci->event_lock);
/* Put some defaults to these undefined bus options */
buf = reg_read(ohci, OHCI1394_BusOptions);
buf |= 0x60000000; /* Enable CMC and ISC */
buf |= 0xE0000000; /* Enable IRMC, CMC and ISC */
buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
buf &= ~0x98000000; /* Disable PMC, IRMC and BMC */
buf &= ~0x18000000; /* Disable PMC and BMC */
reg_write(ohci, OHCI1394_BusOptions, buf);
/* Set the bus number */
......@@ -507,8 +524,10 @@ static void ohci_initialize(struct ti_ohci *ohci)
/* Clear link control register */
reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
/* Enable cycle timer and cycle master */
/* Enable cycle timer and cycle master and set the IRM
* contender bit in our self ID packets. */
reg_write(ohci, OHCI1394_LinkControlSet, 0x00300000);
set_phy_reg_mask(ohci, 4, 0xc0);
/* Clear interrupt registers */
reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
......@@ -610,7 +629,7 @@ static void ohci_initialize(struct ti_ohci *ohci)
((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), ohci->dev->irq,
pci_resource_start(ohci->dev, 0),
pci_resource_start(ohci->dev, 0) + pci_resource_len(ohci->dev, 0),
pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE,
ohci->max_packet_size);
}
......@@ -1242,15 +1261,6 @@ static void ohci_irq_handler(int irq, void *dev_id,
0xffffffff);
reg_write(ohci,OHCI1394_AsReqFilterLoSet,
0xffffffff);
/* Turn on phys dma reception. We should
* probably manage the filtering somehow,
* instead of blindly turning it on. */
reg_write(ohci,OHCI1394_PhyReqFilterHiSet,
0xffffffff);
reg_write(ohci,OHCI1394_PhyReqFilterLoSet,
0xffffffff);
reg_write(ohci,OHCI1394_PhyUpperBound,
0xffff0000);
} else
PRINT(KERN_ERR, ohci->id,
"SelfID received outside of bus reset sequence");
......@@ -1262,6 +1272,31 @@ static void ohci_irq_handler(int irq, void *dev_id,
reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
spin_unlock_irqrestore(&ohci->event_lock, flags);
event &= ~OHCI1394_selfIDComplete;
/* Turn on phys dma reception. We should
* probably manage the filtering somehow,
* instead of blindly turning it on. */
/*
* CAUTION!
* Some chips (TI TSB43AB22) won't take a value in
* the PhyReqFilter register until after the IntEvent
* is cleared for bus reset, and even then a short
* delay is required.
*/
if (phys_dma) {
mdelay(1);
reg_write(ohci,OHCI1394_PhyReqFilterHiSet,
0xffffffff);
reg_write(ohci,OHCI1394_PhyReqFilterLoSet,
0xffffffff);
reg_write(ohci,OHCI1394_PhyUpperBound,
0xffff0000);
}
DBGMSG(ohci->id, "PhyReqFilter=%08x%08x\n",
reg_read(ohci,OHCI1394_PhyReqFilterHiSet),
reg_read(ohci,OHCI1394_PhyReqFilterLoSet));
}
/* Make sure we handle everything, just in case we accidentally
......@@ -1831,7 +1866,7 @@ static u16 ohci_crc16 (u32 *ptr, int length)
crc = 0;
for (; length > 0; length--) {
data = *ptr++;
data = be32_to_cpu(*ptr++);
for (shift = 28; shift >= 0; shift -= 4) {
sum = ((crc >> 12) ^ (data >> shift)) & 0x000f;
crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ sum;
......@@ -2023,7 +2058,7 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
struct hpsb_host *host;
struct ti_ohci *ohci; /* shortcut to currently handled device */
unsigned long ohci_base, ohci_len;
unsigned long ohci_base;
int i;
if (version_printed++ == 0)
......@@ -2041,6 +2076,7 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
ohci->id = card_id_counter++;
ohci->dev = dev;
ohci->host = host;
ohci->init_state = OHCI_INIT_ALLOC_HOST;
host->pdev = dev;
pci_set_drvdata(dev, ohci);
......@@ -2063,6 +2099,29 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
ohci->selfid_swap = 1;
#endif
/* We hardwire the MMIO length, since some CardBus adaptors
* fail to report the right length. Anyway, the ohci spec
* clearly says it's 2kb, so this shouldn't be a problem. */
ohci_base = pci_resource_start(dev, 0);
if (pci_resource_len(dev, 0) != OHCI1394_REGISTER_SIZE)
PRINT(KERN_WARNING, ohci->id, "Unexpected PCI resource length of %lx!",
pci_resource_len(dev, 0));
/* Seems PCMCIA handles this internally. Not sure why. Seems
* pretty bogus to force a driver to special case this. */
#ifndef PCMCIA
if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
#endif
ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
if (ohci->registers == NULL)
FAIL(-ENXIO, "Failed to remap registers - card not accessible");
ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
DBGMSG(ohci->id, "Remapped memory spaces reg 0x%p", ohci->registers);
/* csr_config rom allocation */
ohci->csr_config_rom_cpu =
pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
......@@ -2070,62 +2129,17 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
OHCI_DMA_ALLOC("consistent csr_config_rom");
if (ohci->csr_config_rom_cpu == NULL)
FAIL(-ENOMEM, "Failed to allocate buffer config rom");
ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
ohci_base = pci_resource_start(dev, 0);
ohci_len = pci_resource_len(dev, 0);
if (!request_mem_region (ohci_base, ohci_len, OHCI1394_DRIVER_NAME))
FAIL(-ENOMEM, "MMIO resource (0x%lx@0x%lx) unavailable, aborting.",
ohci_base, ohci_len);
ohci->registers = ioremap(ohci_base, ohci_len);
if (ohci->registers == NULL)
FAIL(-ENXIO, "Failed to remap registers - card not accessible");
DBGMSG(ohci->id, "Remapped memory spaces reg 0x%p", ohci->registers);
/* Start off with a softreset, to clear everything to a sane
* state. This will also set Link Power State (LPS), which we
* need in order to start accessing most of the registers. */
ohci_soft_reset(ohci);
/* determinte the number of available IR and IT contexts right away,
because they need to be known for alloc_dma_*_ctx() */
ohci->nb_iso_rcv_ctx =
get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
DBGMSG(ohci->id, "%d iso receive contexts available",
ohci->nb_iso_rcv_ctx);
ohci->ir_ctx_usage = 0;
/* set the usage bits for non-existent contexts so they can't be allocated */
for(i = ohci->nb_iso_rcv_ctx; i < sizeof(ohci->ir_ctx_usage)*8; i++)
__set_bit(i, &ohci->ir_ctx_usage);
ohci->nb_iso_xmit_ctx =
get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
DBGMSG(ohci->id, "%d iso transmit contexts available",
ohci->nb_iso_xmit_ctx);
ohci->it_ctx_usage = 0;
/* set the usage bits for non-existent contexts so they can't be allocated */
for(i = ohci->nb_iso_xmit_ctx; i < sizeof(ohci->it_ctx_usage)*8; i++)
__set_bit(i, &ohci->it_ctx_usage);
/*
* self-id dma buffer allocation
*/
/* self-id dma buffer allocation */
ohci->selfid_buf_cpu =
pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
&ohci->selfid_buf_bus);
OHCI_DMA_ALLOC("consistent selfid_buf");
if (ohci->selfid_buf_cpu == NULL)
FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
PRINT(KERN_INFO, ohci->id, "SelfID buffer %p is not aligned on "
......@@ -2135,6 +2149,7 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
/* No self-id errors at startup */
ohci->self_id_errors = 0;
ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
/* AR DMA request context allocation */
ohci->ar_req_context =
alloc_dma_rcv_ctx(ohci, DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
......@@ -2169,6 +2184,9 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
if (ohci->at_resp_context == NULL)
FAIL(-ENOMEM, "Failed to allocate AT Resp context");
ohci->ir_ctx_usage = 0;
ohci->it_ctx_usage = 0;
/* IR DMA context */
ohci->ir_context =
alloc_dma_rcv_ctx(ohci, DMA_CTX_ISO, 0, IR_NUM_DESC,
......@@ -2199,30 +2217,36 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
OHCI1394_DRIVER_NAME, ohci))
FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
ohci->init_state = OHCI_INIT_HAVE_IRQ;
ohci_initialize(ohci);
/* Tell the highlevel this host is ready */
hpsb_add_host(host);
ohci->init_state = OHCI_INIT_DONE;
return 0;
#undef FAIL
}
static void __devexit ohci1394_pci_remove(struct pci_dev *pdev)
static void ohci1394_pci_remove(struct pci_dev *pdev)
{
struct ti_ohci *ohci;
quadlet_t buf;
ohci = pci_get_drvdata(pdev);
if (!ohci)
return;
if (ohci->host)
switch (ohci->init_state) {
case OHCI_INIT_DONE:
hpsb_remove_host(ohci->host);
/* Soft reset before we start */
case OHCI_INIT_HAVE_IRQ:
/* Soft reset before we start - this disables
* interrupts and clears linkEnable and LPS. */
ohci_soft_reset(ohci);
free_irq(ohci->dev->irq, ohci);
case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
/* Free AR dma */
free_dma_rcv_ctx(&ohci->ar_req_context);
free_dma_rcv_ctx(&ohci->ar_resp_context);
......@@ -2237,47 +2261,32 @@ static void __devexit ohci1394_pci_remove(struct pci_dev *pdev)
/* Free IT dma */
free_dma_trm_ctx(&ohci->it_context);
/* Disable all interrupts */
reg_write(ohci, OHCI1394_IntMaskClear, 0x80000000);
free_irq(ohci->dev->irq, ohci);
/* Free self-id buffer */
if (ohci->selfid_buf_cpu) {
case OHCI_INIT_HAVE_SELFID_BUFFER:
pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
ohci->selfid_buf_cpu,
ohci->selfid_buf_bus);
OHCI_DMA_FREE("consistent selfid_buf");
}
/* Free config rom */
if (ohci->csr_config_rom_cpu) {
case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
ohci->csr_config_rom_cpu,
ohci->csr_config_rom_bus);
OHCI_DMA_FREE("consistent csr_config_rom");
}
/* Disable our bus options */
buf = reg_read(ohci, OHCI1394_BusOptions);
buf &= ~0xf8000000;
buf |= 0x00ff0000;
reg_write(ohci, OHCI1394_BusOptions, buf);
/* Clear LinkEnable and LPS */
reg_write(ohci, OHCI1394_HCControlClear, 0x000a0000);
if (ohci->registers)
case OHCI_INIT_HAVE_IOMAPPING:
iounmap(ohci->registers);
release_mem_region (pci_resource_start(ohci->dev, 0),
pci_resource_len(ohci->dev, 0));
case OHCI_INIT_HAVE_MEM_REGION:
#ifndef PCMCIA
release_mem_region(pci_resource_start(ohci->dev, 0),
OHCI1394_REGISTER_SIZE);
#endif
#ifdef CONFIG_ALL_PPC
/* On UniNorth, power down the cable and turn off the
* chip clock when the module is removed to save power
* on laptops. Turning it back ON is done by the arch
* code when pci_enable_device() is called
*/
/* On UniNorth, power down the cable and turn off the chip
* clock when the module is removed to save power on
* laptops. Turning it back ON is done by the arch code when
* pci_enable_device() is called */
{
struct device_node* of_node;
......@@ -2289,8 +2298,10 @@ static void __devexit ohci1394_pci_remove(struct pci_dev *pdev)
}
#endif /* CONFIG_ALL_PPC */
case OHCI_INIT_ALLOC_HOST:
pci_set_drvdata(ohci->dev, NULL);
hpsb_unref_host(ohci->host);
}
}
#define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
......@@ -2313,7 +2324,7 @@ static struct pci_driver ohci1394_pci_driver = {
name: OHCI1394_DRIVER_NAME,
id_table: ohci1394_pci_tbl,
probe: ohci1394_pci_probe,
remove: __devexit_p(ohci1394_pci_remove),
remove: ohci1394_pci_remove,
};
......
......@@ -21,6 +21,8 @@
#ifndef _OHCI1394_H
#define _OHCI1394_H
#include <asm/io.h>
#include "ieee1394_types.h"
#include <asm/io.h>
......@@ -144,7 +146,16 @@ struct ti_ohci {
struct pci_dev *dev;
u32 state;
enum {
OHCI_INIT_ALLOC_HOST,
OHCI_INIT_HAVE_MEM_REGION,
OHCI_INIT_HAVE_IOMAPPING,
OHCI_INIT_HAVE_CONFIG_ROM_BUFFER,
OHCI_INIT_HAVE_SELFID_BUFFER,
OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE,
OHCI_INIT_HAVE_IRQ,
OHCI_INIT_DONE,
} init_state;
/* remapped memory spaces */
void *registers;
......
......@@ -2,6 +2,7 @@
* ti_pcilynx.c - Texas Instruments PCILynx driver
* Copyright (C) 1999,2000 Andreas Bombe <andreas.bombe@munich.netsurf.de>,
* Stephan Linz <linz@mazet.de>
* Manfred Weihs <weihs@ict.tuwien.ac.at>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -41,6 +42,8 @@
#include "highlevel.h"
#include "pcilynx.h"
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
/* print general (card independent) information */
#define PRINT_G(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
......@@ -56,9 +59,85 @@
#endif
/* Module Parameters */
MODULE_PARM(skip_eeprom,"i");
MODULE_PARM_DESC(skip_eeprom, "Do not try to read bus info block from serial eeprom, but user generic one (default = 0).");
static int skip_eeprom = 0;
static struct hpsb_host_driver *lynx_driver;
static unsigned int card_id;
/*
* I2C stuff
*/
/* the i2c stuff was inspired by i2c-philips-par.c */
static void bit_setscl(void *data, int state)
{
if (state) {
((struct ti_lynx *) data)->i2c_driven_state |= 0x00000040;
} else {
((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000040;
}
reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
}
static void bit_setsda(void *data, int state)
{
if (state) {
((struct ti_lynx *) data)->i2c_driven_state |= 0x00000010;
} else {
((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000010;
}
reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
}
static int bit_getscl(void *data)
{
return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000040;
}
static int bit_getsda(void *data)
{
return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000010;
}
static int bit_reg(struct i2c_client *client)
{
return 0;
}
static int bit_unreg(struct i2c_client *client)
{
return 0;
}
static struct i2c_algo_bit_data bit_data = {
NULL,
bit_setsda,
bit_setscl,
bit_getsda,
bit_getscl,
5, 5, 100, /* waits, timeout */
};
static struct i2c_adapter bit_ops = {
"PCILynx I2C adapter",
0xAA, //FIXME: probably we should get an id in i2c-id.h
NULL,
NULL,
NULL,
NULL,
bit_reg,
bit_unreg,
};
/*
* PCL handling functions.
*/
......@@ -880,7 +959,7 @@ static ssize_t mem_read(struct file *file, char *buffer, size_t count,
retval = copy_to_user(buffer, md->lynx->mem_dma_buffer, count);
up(&md->lynx->mem_dma_mutex);
if (retval) return -EFAULT;
if (retval < 0) return retval;
*offset += count;
return count;
}
......@@ -1232,6 +1311,11 @@ static int __devinit add_card(struct pci_dev *dev,
int i;
int error;
/* needed for i2c communication with serial eeprom */
struct i2c_adapter i2c_adapter;
struct i2c_algo_bit_data i2c_adapter_data;
int got_valid_bus_info_block = 0; /* set to 1, if we were able to get a valid bus info block from serial eeprom */
error = -ENXIO;
......@@ -1492,6 +1576,94 @@ static int __devinit add_card(struct pci_dev *dev,
if (i != -1) set_phy_reg(lynx, 4, i | 0x40);
}
if (!skip_eeprom)
{
i2c_adapter = bit_ops;
i2c_adapter_data = bit_data;
i2c_adapter.algo_data = &i2c_adapter_data;
i2c_adapter_data.data = lynx;
#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
PRINT(KERN_DEBUG, lynx->id,"original eeprom control: %d",reg_read(lynx,SERIAL_EEPROM_CONTROL));
#endif
/* reset hardware to sane state */
lynx->i2c_driven_state = 0x00000070;
reg_write(lynx, SERIAL_EEPROM_CONTROL, lynx->i2c_driven_state);
if (i2c_bit_add_bus(&i2c_adapter) < 0)
{
PRINT(KERN_ERR, lynx->id, "unable to register i2c");
}
else
{
/* do i2c stuff */
unsigned char i2c_cmd = 0x10;
struct i2c_msg msg[2] = { { 0x50, 0, 1, &i2c_cmd },
{ 0x50, I2C_M_RD, 20, (unsigned char*) lynx->config_rom }
};
#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
union i2c_smbus_data data;
if (i2c_smbus_xfer(&i2c_adapter, 80, 0, I2C_SMBUS_WRITE, 0, I2C_SMBUS_BYTE,NULL))
PRINT(KERN_ERR, lynx->id,"eeprom read start has failed");
else
{
u16 addr;
for (addr=0x00; addr < 0x100; addr++) {
if (i2c_smbus_xfer(&i2c_adapter, 80, 0, I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE,& data)) {
PRINT(KERN_ERR, lynx->id, "unable to read i2c %x", addr);
break;
}
else
PRINT(KERN_DEBUG, lynx->id,"got serial eeprom data at %x: %x",addr, data.byte);
}
}
#endif
/* we use i2c_transfer, because i2c_smbus_read_block_data does not work properly and we
do it more efficiently in one transaction rather then using several reads */
if (i2c_transfer(&i2c_adapter, msg, 2) < 0) {
PRINT(KERN_ERR, lynx->id, "unable to read bus info block from i2c");
} else {
#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
int i;
#endif
PRINT(KERN_INFO, lynx->id, "got bus info block from serial eeprom");
/* FIXME: probably we shoud rewrite the max_rec, max_ROM(1394a), generation(1394a) and link_spd(1394a) field
and recalculate the CRC */
#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
for (i=0; i < 5 ; i++)
PRINT(KERN_DEBUG, lynx->id, "Businfo block quadlet %i: %08x",i, be32_to_cpu(lynx->config_rom[i]));
#endif
/* info_length, crc_length and 1394 magic number to check, if it is really a bus info block */
if (((be32_to_cpu(lynx->config_rom[0]) & 0xffff0000) == 0x04040000) &&
(lynx->config_rom[1] == __constant_cpu_to_be32(0x31333934)))
{
PRINT(KERN_DEBUG, lynx->id, "read a valid bus info block from");
got_valid_bus_info_block = 1;
} else {
PRINT(KERN_WARNING, lynx->id, "read something from serial eeprom, but it does not seem to be a valid bus info block");
}
}
i2c_bit_del_bus(&i2c_adapter);
}
}
if (got_valid_bus_info_block) {
memcpy(lynx->config_rom+5,lynx_csr_rom+5,sizeof(lynx_csr_rom)-20);
} else {
PRINT(KERN_INFO, lynx->id, "since we did not get a bus info block from serial eeprom, we use a generic one with a hard coded GUID");
memcpy(lynx->config_rom,lynx_csr_rom,sizeof(lynx_csr_rom));
}
hpsb_add_host(host);
lynx->state = is_host;
......@@ -1503,7 +1675,8 @@ static int __devinit add_card(struct pci_dev *dev,
static size_t get_lynx_rom(struct hpsb_host *host, const quadlet_t **ptr)
{
*ptr = lynx_csr_rom;
struct ti_lynx *lynx = host->hostdata;
*ptr = lynx->config_rom;
return sizeof(lynx_csr_rom);
}
......
......@@ -25,6 +25,8 @@
#define CHANNEL_ASYNC_SEND 3
#define CHANNEL_ISO_SEND 4
#define PCILYNX_CONFIG_ROM_LENGTH 1024
typedef int pcl_t;
struct ti_lynx {
......@@ -48,7 +50,7 @@ struct ti_lynx {
void *local_rom;
void *local_ram;
void *aux_port;
quadlet_t config_rom[PCILYNX_CONFIG_ROM_LENGTH/4];
#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
atomic_t aux_intr_seen;
......@@ -109,6 +111,8 @@ struct ti_lynx {
struct tasklet_struct tq;
spinlock_t lock;
} iso_rcv;
u32 i2c_driven_state; /* the state we currently drive the Serial EEPROM Control register */
};
/* the per-file data structure for mem space access */
......@@ -156,6 +160,8 @@ static inline void reg_clear_bits(const struct ti_lynx *lynx, int offset,
#define MISC_CONTROL 0x40
#define MISC_CONTROL_SWRESET (1<<0)
#define SERIAL_EEPROM_CONTROL 0x44
#define PCI_INT_STATUS 0x48
#define PCI_INT_ENABLE 0x4c
/* status and enable have identical bit numbers */
......
......@@ -189,6 +189,8 @@ static void remove_host(struct hpsb_host *host)
}
kfree(hi);
atomic_inc(&internal_generation);
}
static void host_reset(struct hpsb_host *host)
......@@ -223,8 +225,6 @@ static void host_reset(struct hpsb_host *host)
}
}
spin_unlock_irqrestore(&host_info_lock, flags);
atomic_inc(&internal_generation);
}
static void iso_receive(struct hpsb_host *host, int channel, quadlet_t *data,
......
......@@ -86,6 +86,9 @@
* sbp2_max_sectors, - Change max sectors per I/O supported (default = 255)
* sbp2_max_outstanding_cmds - Change max outstanding concurrent commands (default = 8)
* sbp2_max_cmds_per_lun - Change max concurrent commands per sbp2 device (default = 1)
* sbp2_exclusive_login - Set to zero if you'd like to allow multiple hosts the ability
* to log in at the same time. Sbp2 device must support this,
* and you must know what you're doing (default = 1)
*
* (e.g. insmod sbp2 sbp2_serialize_io = 1)
*
......@@ -135,11 +138,14 @@
* - Error Handling: SCSI aborts and bus reset requests are handled somewhat
* but the code needs additional debugging.
*
* - The SBP-2 driver is currently only supported as a module. It would not take
* - Module: The SBP-2 driver is currently only supported as a module. It would not take
* much work to allow it to be compiled into the kernel, but you'd have to
* add some init code to the kernel to support this... and modules are much
* more flexible anyway. ;-)
*
* - Hot-plugging: Interaction with the SCSI stack and support for hot-plugging could
* stand some improvement.
*
*
* History:
*
......@@ -265,7 +271,7 @@
* which do not support requests of 128KB or greater. Now use
* max_sectors scsi host entry to limit transfer sizes.
* * Change status fifo address from a single address to a set of addresses,
* with each sbp2 device having it's own status fifo address. This makes
* with each sbp2 device having its own status fifo address. This makes
* it easier to match the status write to the sbp2 device instance.
* * Minor change to use lun when logging into sbp2 devices. First step in
* supporting multi-lun devices such as CD/DVD changer devices.
......@@ -279,6 +285,19 @@
* Needed to bump down max commands per lun because of the !%@&*^# QPS CDRW
* drive I have, which doesn't seem to get along with other sbp2 devices
* (or handle linked commands well).
* 04/21/02 - Added some additional debug capabilities:
* * Able to handle phys dma requests directly, if host controller has phys
* dma disabled (e.g. insmod ohci1394 phys_dma=0). Undefine CONFIG_IEEE1394_SBP2_PHYS_DMA
* if you'd like to disable sbp2 driver from registering for phys address range.
* * New packet dump debug define (CONFIG_IEEE1394_SBP2_PACKET_DUMP) which allows
* dumping of all sbp2 related packets sent and received. Especially effective
* when phys dma is disabled on ohci controller (e.g. insmod ohci1394 phys_dma=0).
* * Added new sbp2 module load option (sbp2_exclusive_login) for allowing
* non-exclusive login to sbp2 device, for special multi-host applications.
* 04/23/02 - Fix for Sony CD-ROM drives. Only send fetch agent reset to sbp2 device if it
* returns the dead bit in status. Thanks to Chandan (chandan@toad.net) for this one.
* 04/27/02 - Fix sbp2 login problem on SMP systems, enable real spinlocks by default. (JSG)
* 06/09/02 - Don't force 36-bute SCSI inquiry, but leave in a define for badly behaved devices. (JSG)
*/
......@@ -305,6 +324,7 @@
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <asm/atomic.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/scatterlist.h>
......@@ -329,6 +349,9 @@
#include "ieee1394_hotplug.h"
#include "sbp2.h"
static char version[] __devinitdata =
"$Rev: 507 $ James Goodwin <jamesg@filanet.com>";
/*
* Module load parameter definitions
*/
......@@ -387,6 +410,18 @@ MODULE_PARM(sbp2_max_cmds_per_lun,"i");
MODULE_PARM_DESC(sbp2_max_cmds_per_lun, "Change max concurrent commands per sbp2 device (default = 1)");
static int sbp2_max_cmds_per_lun = SBP2SCSI_MAX_CMDS_PER_LUN;
/*
* Exclusive login to sbp2 device? In most cases, the sbp2 driver should do an exclusive login, as it's
* generally unsafe to have two hosts talking to a single sbp2 device at the same time (filesystem
* coherency, etc.). If you're running an sbp2 device that supports multiple logins, and you're either
* running read-only filesystems or some sort of special filesystem supporting multiple hosts, then
* set sbp2_exclusive_login to zero. Note: The Oxsemi OXFW911 sbp2 chipset supports up to four
* concurrent logins.
*/
MODULE_PARM(sbp2_exclusive_login,"i");
MODULE_PARM_DESC(sbp2_exclusive_login, "Exclusive login to sbp2 device (default = 1)");
static int sbp2_exclusive_login = 1;
/*
* Export information about protocols/devices supported by this driver.
......@@ -411,6 +446,7 @@ MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
/* #define CONFIG_IEEE1394_SBP2_DEBUG_DMA */
/* #define CONFIG_IEEE1394_SBP2_DEBUG 1 */
/* #define CONFIG_IEEE1394_SBP2_DEBUG 2 */
/* #define CONFIG_IEEE1394_SBP2_PACKET_DUMP */
#ifdef CONFIG_IEEE1394_SBP2_DEBUG_ORBS
#define SBP2_ORB_DEBUG(fmt, args...) HPSB_ERR("sbp2(%s): "fmt, __FUNCTION__, ## args)
......@@ -456,10 +492,10 @@ static u32 global_outstanding_dmas = 0;
#define SBP2_ERR(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
/*
* Spinlock debugging stuff. I'm playing it safe until the driver has been
* debugged on SMP. (JSG)
* Spinlock debugging stuff.
*/
/* #define SBP2_USE_REAL_SPINLOCKS */
#define SBP2_USE_REAL_SPINLOCKS
#ifdef SBP2_USE_REAL_SPINLOCKS
#define sbp2_spin_lock(lock, flags) spin_lock_irqsave(lock, flags)
#define sbp2_spin_unlock(lock, flags) spin_unlock_irqrestore(lock, flags);
......@@ -469,6 +505,14 @@ static spinlock_t sbp2_host_info_lock = SPIN_LOCK_UNLOCKED;
#define sbp2_spin_unlock(lock, flags) do {restore_flags(flags);} while (0)
#endif
/*
* SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on if your sbp2 device
* is not properly handling the SCSI inquiry command. This hack makes the inquiry look more
* like a typical MS Windows inquiry.
*/
/* #define SBP2_FORCE_36_BYTE_INQUIRY */
/*
* Globals
*/
......@@ -490,6 +534,13 @@ static struct hpsb_address_ops sbp2_ops = {
write: sbp2_handle_status_write
};
#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
static struct hpsb_address_ops sbp2_physdma_ops = {
read: sbp2_handle_physdma_read,
write: sbp2_handle_physdma_write,
};
#endif
static struct hpsb_protocol_driver sbp2_driver = {
name: "SBP2 Driver",
id_table: sbp2_id_table,
......@@ -498,7 +549,6 @@ static struct hpsb_protocol_driver sbp2_driver = {
update: sbp2_update
};
/**************************************
* General utility functions
......@@ -537,6 +587,56 @@ static __inline__ void sbp2util_cpu_to_be32_buffer(void *buffer, int length)
#define sbp2util_cpu_to_be32_buffer(x,y)
#endif
#ifdef CONFIG_IEEE1394_SBP2_PACKET_DUMP
/*
* Debug packet dump routine. Length is in bytes.
*/
static void sbp2util_packet_dump(void *buffer, int length, char *dump_name, u32 dump_phys_addr)
{
int i;
unsigned char *dump = buffer;
if (!dump || !length || !dump_name)
return;
if (dump_phys_addr)
printk("[%s, 0x%x]", dump_name, dump_phys_addr);
else
printk("[%s]", dump_name);
for (i = 0; i < length; i++) {
if (i > 0x3f) {
printk("\n ...");
break;
}
if ((i & 0x3) == 0)
printk(" ");
if ((i & 0xf) == 0)
printk("\n ");
printk("%02x ", (int) dump[i]);
}
printk("\n");
return;
}
#else
#define sbp2util_packet_dump(w,x,y,z)
#endif
/*
* Goofy routine that basically does a down_timeout function.
*/
static int sbp2util_down_timeout(atomic_t *done, int timeout)
{
int i;
for (i = timeout; (i > 0 && atomic_read(done) == 0); i-= HZ/10) {
set_current_state(TASK_INTERRUPTIBLE);
if (schedule_timeout(HZ/10)) /* 100ms */
return(1);
}
return ((i > 0) ? 0:1);
}
/*
* This function is called to initially create a packet pool for use in
* sbp2 I/O requests. This packet pool is used when sending out sbp2
......@@ -665,7 +765,7 @@ sbp2util_allocate_write_request_packet(struct sbp2scsi_host_info *hi,
hpsb_node_fill_packet(ne, packet);
packet->tlabel = get_tlabel(hi->host, packet->node_id, 1);
packet->tlabel = get_tlabel(hi->host, packet->node_id, 0);
if (!data_size) {
fill_async_writequad(packet, addr, data);
......@@ -950,6 +1050,13 @@ int sbp2_init(void)
SBP2_STATUS_FIFO_ADDRESS +
SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(SBP2SCSI_MAX_SCSI_IDS+1));
/*
* Handle data movement if physical dma is not enabled/supported on host controller
*/
#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
hpsb_register_addrspace(sbp2_hl_handle, &sbp2_physdma_ops, 0x0ULL, 0xfffffffcULL);
#endif
hpsb_register_protocol(&sbp2_driver);
return 0;
......@@ -1241,7 +1348,7 @@ static int sbp2_start_device(struct sbp2scsi_host_info *hi, struct unit_director
scsi_id->max_payload_size = sbp2_speedto_maxrec[SPEED_100];
ud->driver_data = scsi_id;
init_waitqueue_head(&scsi_id->sbp2_login_wait);
atomic_set(&scsi_id->sbp2_login_complete, 0);
/*
* Initialize structures needed for the command orb pool.
......@@ -1379,12 +1486,55 @@ static void sbp2_remove_device(struct sbp2scsi_host_info *hi,
kfree(scsi_id);
}
#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
/*
* This function deals with physical dma write requests (for adapters that do not support
* physical dma in hardware). Mostly just here for debugging...
*/
static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid, int destid, quadlet_t *data,
u64 addr, unsigned int length)
{
/*
* Manually put the data in the right place.
*/
memcpy(bus_to_virt((u32)addr), data, length);
sbp2util_packet_dump(data, length, "sbp2 phys dma write by device", (u32)addr);
return(RCODE_COMPLETE);
}
/*
* This function deals with physical dma read requests (for adapters that do not support
* physical dma in hardware). Mostly just here for debugging...
*/
static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid, quadlet_t *data,
u64 addr, unsigned int length)
{
/*
* Grab data from memory and send a read response.
*/
memcpy(data, bus_to_virt((u32)addr), length);
sbp2util_packet_dump(data, length, "sbp2 phys dma read by device", (u32)addr);
return(RCODE_COMPLETE);
}
#endif
/**************************************
* SBP-2 protocol related section
**************************************/
/*
* This function determines if we should convert scsi commands for a particular sbp2 device type
*/
static __inline__ int sbp2_command_conversion_device_type(u8 device_type)
{
return (((device_type == TYPE_DISK) ||
(device_type == TYPE_SDAD) ||
(device_type == TYPE_ROM)) ? 1:0);
}
/*
* This function is called in order to login to a particular SBP-2 device,
* after a bus reset.
......@@ -1392,7 +1542,6 @@ static void sbp2_remove_device(struct sbp2scsi_host_info *hi,
static int sbp2_login_device(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id)
{
quadlet_t data[2];
unsigned long flags;
SBP2_DEBUG("sbp2_login_device");
......@@ -1412,7 +1561,7 @@ static int sbp2_login_device(struct sbp2scsi_host_info *hi, struct scsi_id_insta
scsi_id->login_orb->lun_misc = ORB_SET_FUNCTION(LOGIN_REQUEST);
scsi_id->login_orb->lun_misc |= ORB_SET_RECONNECT(0); /* One second reconnect time */
scsi_id->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(1); /* Exclusive access to device */
scsi_id->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(sbp2_exclusive_login); /* Exclusive access to device */
scsi_id->login_orb->lun_misc |= ORB_SET_NOTIFY(1); /* Notify us of login complete */
/* Set the lun if we were able to pull it from the device's unit directory */
if (scsi_id->sbp2_device_type_and_lun != SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) {
......@@ -1437,6 +1586,9 @@ static int sbp2_login_device(struct sbp2scsi_host_info *hi, struct scsi_id_insta
SBP2_DEBUG("sbp2_login_device: orb byte-swapped");
sbp2util_packet_dump(scsi_id->login_orb, sizeof(struct sbp2_login_orb),
"sbp2 login orb", scsi_id->login_orb_dma);
/*
* Initialize login response and status fifo
*/
......@@ -1452,35 +1604,28 @@ static int sbp2_login_device(struct sbp2scsi_host_info *hi, struct scsi_id_insta
data[1] = scsi_id->login_orb_dma;
sbp2util_cpu_to_be32_buffer(data, 8);
atomic_set(&scsi_id->sbp2_login_complete, 0);
SBP2_DEBUG("sbp2_login_device: prepared to write");
hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
SBP2_DEBUG("sbp2_login_device: written");
/*
* Wait for login status... but, only if the device has not
* already logged-in (some devices are fast)
* Wait for login status (up to 20 seconds)...
*/
save_flags(flags);
cli();
/* 20 second timeout */
if (scsi_id->status_block.ORB_offset_lo != scsi_id->login_orb_dma)
sleep_on_timeout(&scsi_id->sbp2_login_wait, 20*HZ);
restore_flags(flags);
SBP2_DEBUG("sbp2_login_device: initial check");
if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 20*HZ)) {
SBP2_ERR("Error logging into SBP-2 device - login timed-out");
return(-EIO);
}
/*
* Match status to the login orb. If they do not match, it's
* probably because the login timed-out.
* Sanity. Make sure status returned matches login orb.
*/
if (scsi_id->status_block.ORB_offset_lo != scsi_id->login_orb_dma) {
SBP2_ERR("Error logging into SBP-2 device - login timed-out");
return(-EIO);
}
SBP2_DEBUG("sbp2_login_device: second check");
/*
* Check status
*/
......@@ -1552,6 +1697,9 @@ static int sbp2_logout_device(struct sbp2scsi_host_info *hi, struct scsi_id_inst
*/
sbp2util_cpu_to_be32_buffer(scsi_id->logout_orb, sizeof(struct sbp2_logout_orb));
sbp2util_packet_dump(scsi_id->logout_orb, sizeof(struct sbp2_logout_orb),
"sbp2 logout orb", scsi_id->logout_orb_dma);
/*
* Ok, let's write to the target's management agent register
*/
......@@ -1559,10 +1707,12 @@ static int sbp2_logout_device(struct sbp2scsi_host_info *hi, struct scsi_id_inst
data[1] = scsi_id->logout_orb_dma;
sbp2util_cpu_to_be32_buffer(data, 8);
atomic_set(&scsi_id->sbp2_login_complete, 0);
hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
/* Wait for device to logout...1 second. */
sleep_on_timeout(&scsi_id->sbp2_login_wait, HZ);
sbp2util_down_timeout(&scsi_id->sbp2_login_complete, HZ);
SBP2_INFO("Logged out of SBP-2 device");
......@@ -1577,7 +1727,6 @@ static int sbp2_logout_device(struct sbp2scsi_host_info *hi, struct scsi_id_inst
static int sbp2_reconnect_device(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id)
{
quadlet_t data[2];
unsigned long flags;
SBP2_DEBUG("sbp2_reconnect_device");
......@@ -1607,6 +1756,9 @@ static int sbp2_reconnect_device(struct sbp2scsi_host_info *hi, struct scsi_id_i
*/
sbp2util_cpu_to_be32_buffer(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb));
sbp2util_packet_dump(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb),
"sbp2 reconnect orb", scsi_id->reconnect_orb_dma);
/*
* Initialize status fifo
*/
......@@ -1619,22 +1771,20 @@ static int sbp2_reconnect_device(struct sbp2scsi_host_info *hi, struct scsi_id_i
data[1] = scsi_id->reconnect_orb_dma;
sbp2util_cpu_to_be32_buffer(data, 8);
atomic_set(&scsi_id->sbp2_login_complete, 0);
hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
/*
* Wait for reconnect status... but, only if the device has not
* already reconnected (some devices are fast).
* Wait for reconnect status (up to 1 second)...
*/
save_flags(flags);
cli();
/* One second timout */
if (scsi_id->status_block.ORB_offset_lo != scsi_id->reconnect_orb_dma)
sleep_on_timeout(&scsi_id->sbp2_login_wait, HZ);
restore_flags(flags);
if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, HZ)) {
SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out");
return(-EIO);
}
/*
* Match status to the reconnect orb. If they do not match, it's
* probably because the reconnect timed-out.
* Sanity. Make sure status returned matches reconnect orb.
*/
if (scsi_id->status_block.ORB_offset_lo != scsi_id->reconnect_orb_dma) {
SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out");
......@@ -2002,6 +2152,10 @@ static int sbp2_create_command_orb(struct sbp2scsi_host_info *hi,
/* Number of page table (s/g) elements */
command_orb->misc |= ORB_SET_DATA_SIZE(sg_count);
sbp2util_packet_dump(scatter_gather_element,
(sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
"sbp2 s/g list", command->sge_dma);
/*
* Byte swap page tables if necessary
*/
......@@ -2081,6 +2235,10 @@ static int sbp2_create_command_orb(struct sbp2scsi_host_info *hi,
/* Number of page table (s/g) elements */
command_orb->misc |= ORB_SET_DATA_SIZE(sg_count);
sbp2util_packet_dump(scatter_gather_element,
(sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
"sbp2 s/g list", command->sge_dma);
/*
* Byte swap page tables if necessary
*/
......@@ -2219,13 +2377,11 @@ static int sbp2_send_command(struct sbp2scsi_host_info *hi, struct scsi_id_insta
{
unchar *cmd = (unchar *) SCpnt->cmnd;
unsigned int request_bufflen = SCpnt->request_bufflen;
u8 device_type
= SBP2_DEVICE_TYPE (scsi_id->sbp2_device_type_and_lun);
struct sbp2_command_info *command;
SBP2_DEBUG("sbp2_send_command");
SBP2_DEBUG("SCSI command:");
#if CONFIG_IEEE1394_SBP2_DEBUG >= 2
#if (CONFIG_IEEE1394_SBP2_DEBUG >= 2) || defined(CONFIG_IEEE1394_SBP2_PACKET_DUMP)
printk("[scsi command]\n ");
print_command (cmd);
#endif
SBP2_DEBUG("SCSI transfer size = %x", request_bufflen);
......@@ -2242,11 +2398,14 @@ static int sbp2_send_command(struct sbp2scsi_host_info *hi, struct scsi_id_insta
/*
* The scsi stack sends down a request_bufflen which does not match the
* length field in the scsi cdb. This causes some sbp2 devices to
* reject this inquiry command. Hack fix is to set both buff length and
* length field in cdb to 36. This gives best compatibility.
* reject this inquiry command. Fix the request_bufflen.
*/
if (*cmd == INQUIRY) {
#ifdef SBP2_FORCE_36_BYTE_INQUIRY
request_bufflen = cmd[4] = 0x24;
#else
request_bufflen = cmd[4];
#endif
}
/*
......@@ -2259,11 +2418,10 @@ static int sbp2_send_command(struct sbp2scsi_host_info *hi, struct scsi_id_insta
* Update our cdb if necessary (to handle sbp2 RBC command set
* differences). This is where the command set hacks go! =)
*/
if ((device_type == TYPE_DISK) ||
(device_type == TYPE_SDAD) ||
(device_type == TYPE_ROM)) {
sbp2_check_sbp2_command(command->command_orb.cdb);
}
sbp2_check_sbp2_command(scsi_id, command->command_orb.cdb);
sbp2util_packet_dump(&command->command_orb, sizeof(struct sbp2_command_orb),
"sbp2 command orb", command->command_orb_dma);
/*
* Initialize status fifo
......@@ -2283,9 +2441,10 @@ static int sbp2_send_command(struct sbp2scsi_host_info *hi, struct scsi_id_insta
* This function deals with command set differences between Linux scsi
* command set and sbp2 RBC command set.
*/
static void sbp2_check_sbp2_command(unchar *cmd)
static void sbp2_check_sbp2_command(struct scsi_id_instance_data *scsi_id, unchar *cmd)
{
unchar new_cmd[16];
u8 device_type = SBP2_DEVICE_TYPE (scsi_id->sbp2_device_type_and_lun);
SBP2_DEBUG("sbp2_check_sbp2_command");
......@@ -2293,6 +2452,8 @@ static void sbp2_check_sbp2_command(unchar *cmd)
case READ_6:
if (sbp2_command_conversion_device_type(device_type)) {
SBP2_DEBUG("Convert READ_6 to READ_10");
/*
......@@ -2311,10 +2472,14 @@ static void sbp2_check_sbp2_command(unchar *cmd)
memcpy(cmd, new_cmd, 10);
}
break;
case WRITE_6:
if (sbp2_command_conversion_device_type(device_type)) {
SBP2_DEBUG("Convert WRITE_6 to WRITE_10");
/*
......@@ -2333,11 +2498,15 @@ static void sbp2_check_sbp2_command(unchar *cmd)
memcpy(cmd, new_cmd, 10);
}
break;
case MODE_SENSE:
SBP2_DEBUG("Convert MODE_SENSE_6 to MOSE_SENSE_10");
if (sbp2_command_conversion_device_type(device_type)) {
SBP2_DEBUG("Convert MODE_SENSE_6 to MODE_SENSE_10");
/*
* Need to turn mode_sense_6 into mode_sense_10
......@@ -2355,6 +2524,8 @@ static void sbp2_check_sbp2_command(unchar *cmd)
memcpy(cmd, new_cmd, 10);
}
break;
case MODE_SELECT:
......@@ -2404,8 +2575,7 @@ static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense
* This function is called after a command is completed, in order to do any necessary SBP-2
* response data translations for the SCSI stack
*/
static void sbp2_check_sbp2_response(struct sbp2scsi_host_info *hi,
struct scsi_id_instance_data *scsi_id,
static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
Scsi_Cmnd *SCpnt)
{
u8 *scsi_buf = SCpnt->request_buffer;
......@@ -2451,9 +2621,7 @@ static void sbp2_check_sbp2_response(struct sbp2scsi_host_info *hi,
case MODE_SENSE:
if ((device_type == TYPE_DISK) ||
(device_type == TYPE_SDAD) ||
(device_type == TYPE_ROM)) {
if (sbp2_command_conversion_device_type(device_type)) {
SBP2_DEBUG("Modify mode sense response (10 byte version)");
......@@ -2495,6 +2663,8 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
SBP2_DEBUG("sbp2_handle_status_write");
sbp2util_packet_dump(data, length, "sbp2 status write by device", (u32)addr);
if (!host) {
SBP2_ERR("host is NULL - this is bad!");
return(RCODE_ADDRESS_ERROR);
......@@ -2566,20 +2736,20 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
/*
* Translate SBP-2 status to SCSI sense data
*/
SBP2_DEBUG("CHECK CONDITION");
scsi_status = sbp2_status_to_sense_data((unchar *)&scsi_id->status_block, SCpnt->sense_buffer);
}
/*
* Handle check conditions. If there is either SBP status or SCSI status
* then we'll do a fetch agent reset and note that a check condition
* occured.
* Check to see if the dead bit is set. If so, we'll have to initiate
* a fetch agent reset.
*/
if (STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc) ||
scsi_status) {
if (STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc)) {
/*
* Initiate a fetch agent reset.
*/
SBP2_DEBUG("CHECK CONDITION");
SBP2_DEBUG("Dead bit set - initiating fetch agent reset");
sbp2_agent_reset(hi, scsi_id, SBP2_SEND_NO_WAIT);
}
......@@ -2602,10 +2772,19 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
scsi_id->last_orb = NULL;
}
} else {
/*
* It's probably a login/logout/reconnect status.
*/
if ((scsi_id->login_orb_dma == scsi_id->status_block.ORB_offset_lo) ||
(scsi_id->reconnect_orb_dma == scsi_id->status_block.ORB_offset_lo) ||
(scsi_id->logout_orb_dma == scsi_id->status_block.ORB_offset_lo)) {
atomic_set(&scsi_id->sbp2_login_complete, 1);
}
}
sbp2_spin_unlock(&hi->sbp2_command_lock, flags);
wake_up(&scsi_id->sbp2_login_wait);
return(RCODE_COMPLETE);
}
......@@ -2805,7 +2984,7 @@ static void sbp2scsi_complete_command(struct sbp2scsi_host_info *hi, struct scsi
* Take care of any sbp2 response data mucking here (RBC stuff, etc.)
*/
if (SCpnt->result == DID_OK) {
sbp2_check_sbp2_response(hi, scsi_id, SCpnt);
sbp2_check_sbp2_response(scsi_id, SCpnt);
}
/*
......@@ -2822,10 +3001,13 @@ static void sbp2scsi_complete_command(struct sbp2scsi_host_info *hi, struct scsi
* retried... it could have happened because of a 1394 bus reset
* or hot-plug...
*/
if ((scsi_status == SBP2_SCSI_STATUS_CHECK_CONDITION) && (SCpnt->sense_buffer[2] == UNIT_ATTENTION)) {
#if 0
if ((scsi_status == SBP2_SCSI_STATUS_CHECK_CONDITION) &&
(SCpnt->sense_buffer[2] == UNIT_ATTENTION)) {
SBP2_DEBUG("UNIT ATTENTION - return busy");
SCpnt->result = DID_BUS_BUSY << 16;
}
#endif
/*
* Tell scsi stack that we're done with this command
......@@ -2835,9 +3017,9 @@ static void sbp2scsi_complete_command(struct sbp2scsi_host_info *hi, struct scsi
done (SCpnt);
spin_unlock_irq(&io_request_lock);
#else
spin_lock_irq(&hi->scsi_host->host_lock);
spin_lock_irq(hi->scsi_host->host_lock);
done (SCpnt);
spin_unlock_irq(&hi->scsi_host->host_lock);
spin_unlock_irq(hi->scsi_host->host_lock);
#endif
return;
......@@ -2905,7 +3087,7 @@ static int sbp2scsi_reset (Scsi_Cmnd *SCpnt)
SBP2_ERR("reset requested");
if (scsi_id) {
SBP2_ERR("Generating IEEE-1394 bus reset");
SBP2_ERR("Generating sbp2 fetch agent reset");
sbp2_agent_reset(hi, scsi_id, SBP2_SEND_NO_WAIT);
}
......@@ -2969,19 +3151,22 @@ static const char *sbp2scsi_info (struct Scsi_Host *host)
if (!hi) /* shouldn't happen, but... */
return "IEEE-1394 SBP-2 protocol driver";
sprintf(info, "IEEE-1394 SBP-2 protocol driver (host: %s)\n"
sprintf(info, "IEEE-1394 SBP-2 protocol driver (host: %s)\n%s\n"
"SBP-2 module load options:\n"
"- Max speed supported: %s\n"
"- Max sectors per I/O supported: %d\n"
"- Max outstanding commands supported: %d\n"
"- Max outstanding commands per lun supported: %d\n"
"- Serialized I/O (debug): %s",
"- Serialized I/O (debug): %s\n"
"- Exclusive login: %s",
hi->host->driver->name,
version,
hpsb_speedto_str[sbp2_max_speed],
sbp2_max_sectors,
sbp2_max_outstanding_cmds,
sbp2_max_cmds_per_lun,
sbp2_serialize_io ? "yes" : "no");
sbp2_serialize_io ? "yes" : "no",
sbp2_exclusive_login ? "yes" : "no");
return info;
}
......
......@@ -398,9 +398,9 @@ struct scsi_id_instance_data {
u32 sbp2_firmware_revision;
/*
* Wait queue used for logins, reconnects, logouts
* Variable used for logins, reconnects, logouts
*/
wait_queue_head_t sbp2_login_wait;
atomic_t sbp2_login_complete;
/*
* Pool of command orbs, so we can have more than overlapped command per id
......@@ -501,6 +501,13 @@ static int sbp2_start_device(struct sbp2scsi_host_info *hi,
static void sbp2_remove_device(struct sbp2scsi_host_info *hi,
struct scsi_id_instance_data *scsi_id);
#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid, int destid, quadlet_t *data,
u64 addr, unsigned int length);
static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid, quadlet_t *data,
u64 addr, unsigned int length);
#endif
/*
* SBP-2 protocol related prototypes
*/
......@@ -523,9 +530,8 @@ static int sbp2_link_orb_command(struct sbp2scsi_host_info *hi, struct scsi_id_i
static int sbp2_send_command(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id,
Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *));
static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data);
static void sbp2_check_sbp2_command(unchar *cmd);
static void sbp2_check_sbp2_response(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id,
Scsi_Cmnd *SCpnt);
static void sbp2_check_sbp2_command(struct scsi_id_instance_data *scsi_id, unchar *cmd);
static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id, Scsi_Cmnd *SCpnt);
static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id);
static int sbp2_set_busy_timeout(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id);
static int sbp2_max_speed_and_size(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id);
......
......@@ -168,86 +168,35 @@ static struct hpsb_highlevel *hl_handle = NULL;
/* Memory management functions */
/*******************************/
#define MDEBUG(x) do { } while(0) /* Debug memory management */
/* [DaveM] I've recoded most of this so that:
* 1) It's easier to tell what is happening
* 2) It's more portable, especially for translating things
* out of vmalloc mapped areas in the kernel.
* 3) Less unnecessary translations happen.
*
* The code used to assume that the kernel vmalloc mappings
* existed in the page tables of every process, this is simply
* not guaranteed. We now use pgd_offset_k which is the
* defined way to get at the kernel page tables.
*/
/* Given PGD from the address space's page table, return the kernel
* virtual mapping of the physical memory mapped at ADR.
*/
static inline unsigned long uvirt_to_kva(pgd_t *pgd, unsigned long adr)
{
unsigned long ret = 0UL;
pmd_t *pmd;
pte_t *ptep, pte;
if (!pgd_none(*pgd)) {
pmd = pmd_offset(pgd, adr);
if (!pmd_none(*pmd)) {
ptep = pte_offset_kernel(pmd, adr);
pte = *ptep;
if(pte_present(pte)) {
ret = (unsigned long)
page_address(pte_page(pte));
ret |= (adr & (PAGE_SIZE - 1));
}
}
}
MDEBUG(printk("uv2kva(%lx-->%lx)", adr, ret));
return ret;
}
static inline unsigned long uvirt_to_bus(unsigned long adr)
{
unsigned long kva, ret;
kva = uvirt_to_kva(pgd_offset(current->mm, adr), adr);
ret = virt_to_bus((void *)kva);
MDEBUG(printk("uv2b(%lx-->%lx)", adr, ret));
return ret;
}
static inline unsigned long kvirt_to_bus(unsigned long adr)
{
unsigned long va, kva, ret;
unsigned long kva, ret;
va = VMALLOC_VMADDR(adr);
kva = uvirt_to_kva(pgd_offset_k(va), va);
kva = (unsigned long) page_address(vmalloc_to_page((void *)adr));
kva |= adr & (PAGE_SIZE-1); /* restore the offset */
ret = virt_to_bus((void *)kva);
MDEBUG(printk("kv2b(%lx-->%lx)", adr, ret));
return ret;
}
/* Here we want the physical address of the memory.
* This is used when initializing the contents of the
* area and marking the pages as reserved.
* This is used when initializing the contents of the area.
*/
static inline unsigned long kvirt_to_pa(unsigned long adr)
{
unsigned long va, kva, ret;
unsigned long kva, ret;
va = VMALLOC_VMADDR(adr);
kva = uvirt_to_kva(pgd_offset_k(va), va);
kva = (unsigned long) page_address(vmalloc_to_page((void *)adr));
kva |= adr & (PAGE_SIZE-1); /* restore the offset */
ret = __pa(kva);
MDEBUG(printk("kv2pa(%lx-->%lx)", adr, ret));
return ret;
}
static void * rvmalloc(unsigned long size)
{
void * mem;
unsigned long adr, page;
unsigned long adr;
size=PAGE_ALIGN(size);
mem=vmalloc_32(size);
if (mem)
{
......@@ -256,8 +205,7 @@ static void * rvmalloc(unsigned long size)
adr=(unsigned long) mem;
while (size > 0)
{
page = kvirt_to_pa(adr);
mem_map_reserve(virt_to_page(__va(page)));
mem_map_reserve(vmalloc_to_page((void *)adr));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
......@@ -267,15 +215,14 @@ static void * rvmalloc(unsigned long size)
static void rvfree(void * mem, unsigned long size)
{
unsigned long adr, page;
unsigned long adr;
if (mem)
{
adr=(unsigned long) mem;
while (size > 0)
while ((long) size > 0)
{
page = kvirt_to_pa(adr);
mem_map_unreserve(virt_to_page(__va(page)));
mem_map_unreserve(vmalloc_to_page((void *)adr));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
......@@ -520,11 +467,11 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
static void reset_ir_status(struct dma_iso_ctx *d, int n)
{
int i;
d->ir_prg[n][0].status = 4;
d->ir_prg[n][1].status = PAGE_SIZE-4;
d->ir_prg[n][0].status = cpu_to_le32(4);
d->ir_prg[n][1].status = cpu_to_le32(PAGE_SIZE-4);
for (i=2;i<d->nb_cmd-1;i++)
d->ir_prg[n][i].status = PAGE_SIZE;
d->ir_prg[n][i].status = d->left_size;
d->ir_prg[n][i].status = cpu_to_le32(PAGE_SIZE);
d->ir_prg[n][i].status = cpu_to_le32(d->left_size);
}
static void initialize_dma_ir_prg(struct dma_iso_ctx *d, int n, int flags)
......@@ -534,38 +481,38 @@ static void initialize_dma_ir_prg(struct dma_iso_ctx *d, int n, int flags)
int i;
/* the first descriptor will read only 4 bytes */
ir_prg[0].control = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
DMA_CTL_BRANCH | 4;
ir_prg[0].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
DMA_CTL_BRANCH | 4);
/* set the sync flag */
if (flags & VIDEO1394_SYNC_FRAMES)
ir_prg[0].control |= DMA_CTL_WAIT;
ir_prg[0].control |= cpu_to_le32(DMA_CTL_WAIT);
ir_prg[0].address = kvirt_to_bus(buf);
ir_prg[0].branchAddress = (virt_to_bus(&(ir_prg[1].control))
& 0xfffffff0) | 0x1;
ir_prg[0].address = cpu_to_le32(kvirt_to_bus(buf));
ir_prg[0].branchAddress = cpu_to_le32((virt_to_bus(&(ir_prg[1].control))
& 0xfffffff0) | 0x1);
/* the second descriptor will read PAGE_SIZE-4 bytes */
ir_prg[1].control = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
DMA_CTL_BRANCH | (PAGE_SIZE-4);
ir_prg[1].address = kvirt_to_bus(buf+4);
ir_prg[1].branchAddress = (virt_to_bus(&(ir_prg[2].control))
& 0xfffffff0) | 0x1;
ir_prg[1].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
DMA_CTL_BRANCH | (PAGE_SIZE-4));
ir_prg[1].address = cpu_to_le32(kvirt_to_bus(buf+4));
ir_prg[1].branchAddress = cpu_to_le32((virt_to_bus(&(ir_prg[2].control))
& 0xfffffff0) | 0x1);
for (i=2;i<d->nb_cmd-1;i++) {
ir_prg[i].control = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
DMA_CTL_BRANCH | PAGE_SIZE;
ir_prg[i].address = kvirt_to_bus(buf+(i-1)*PAGE_SIZE);
ir_prg[i].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
DMA_CTL_BRANCH | PAGE_SIZE);
ir_prg[i].address = cpu_to_le32(kvirt_to_bus(buf+(i-1)*PAGE_SIZE));
ir_prg[i].branchAddress =
(virt_to_bus(&(ir_prg[i+1].control))
& 0xfffffff0) | 0x1;
cpu_to_le32((virt_to_bus(&(ir_prg[i+1].control))
& 0xfffffff0) | 0x1);
}
/* the last descriptor will generate an interrupt */
ir_prg[i].control = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
DMA_CTL_IRQ | DMA_CTL_BRANCH | d->left_size;
ir_prg[i].address = kvirt_to_bus(buf+(i-1)*PAGE_SIZE);
ir_prg[i].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
DMA_CTL_IRQ | DMA_CTL_BRANCH | d->left_size);
ir_prg[i].address = cpu_to_le32(kvirt_to_bus(buf+(i-1)*PAGE_SIZE));
}
static void initialize_dma_ir_ctx(struct dma_iso_ctx *d, int tag, int flags)
......@@ -628,7 +575,7 @@ int wakeup_dma_ir_ctx(struct ti_ohci *ohci, struct dma_iso_ctx *d)
spin_lock(&d->lock);
for (i=0;i<d->num_desc;i++) {
if (d->ir_prg[i][d->nb_cmd-1].status & 0xFFFF0000) {
if (d->ir_prg[i][d->nb_cmd-1].status & cpu_to_le32(0xFFFF0000)) {
reset_ir_status(d, i);
d->buffer_status[i] = VIDEO1394_BUFFER_READY;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)
......@@ -664,7 +611,7 @@ static inline void put_timestamp(struct ti_ohci *ohci, struct dma_iso_ctx * d,
buf[7] = timeStamp & 0xff;
/* if first packet is empty packet, then put timestamp into the next full one too */
if ( (d->it_prg[n][0].data[1] >>16) == 0x008) {
if ( (le32_to_cpu(d->it_prg[n][0].data[1]) >>16) == 0x008) {
buf += d->packet_size;
buf[6] = timeStamp >> 8;
buf[7] = timeStamp & 0xff;
......@@ -683,7 +630,7 @@ static inline void put_timestamp(struct ti_ohci *ohci, struct dma_iso_ctx * d,
buf[7] = timeStamp & 0xff;
/* if first packet is empty packet, then put timestamp into the next full one too */
if ( (d->it_prg[n][0].data[1] >>16) == 0x008) {
if ( (le32_to_cpu(d->it_prg[n][0].data[1]) >>16) == 0x008) {
buf += d->packet_size;
buf[6] = timeStamp >> 8;
buf[7] = timeStamp & 0xff;
......@@ -707,7 +654,8 @@ int wakeup_dma_it_ctx(struct ti_ohci *ohci, struct dma_iso_ctx *d)
spin_lock(&d->lock);
for (i=0;i<d->num_desc;i++) {
if (d->it_prg[i][d->last_used_cmd[i]].end.status& 0xFFFF0000) {
if (d->it_prg[i][d->last_used_cmd[i]].end.status&
cpu_to_le32(0xFFFF0000)) {
int next = d->next_buffer[i];
put_timestamp(ohci, d, next);
d->it_prg[i][d->last_used_cmd[i]].end.status = 0;
......@@ -727,39 +675,40 @@ static void initialize_dma_it_prg(struct dma_iso_ctx *d, int n, int sync_tag)
d->last_used_cmd[n] = d->nb_cmd - 1;
for (i=0;i<d->nb_cmd;i++) {
it_prg[i].begin.control = DMA_CTL_OUTPUT_MORE |
DMA_CTL_IMMEDIATE | 8 ;
it_prg[i].begin.control = cpu_to_le32(DMA_CTL_OUTPUT_MORE |
DMA_CTL_IMMEDIATE | 8) ;
it_prg[i].begin.address = 0;
it_prg[i].begin.status = 0;
it_prg[i].data[0] =
it_prg[i].data[0] = cpu_to_le32(
(SPEED_100 << 16)
| (/* tag */ 1 << 14)
| (d->channel << 8)
| (TCODE_ISO_DATA << 4);
if (i==0) it_prg[i].data[0] |= sync_tag;
it_prg[i].data[1] = d->packet_size << 16;
| (TCODE_ISO_DATA << 4));
if (i==0) it_prg[i].data[0] |= cpu_to_le32(sync_tag);
it_prg[i].data[1] = cpu_to_le32(d->packet_size << 16);
it_prg[i].data[2] = 0;
it_prg[i].data[3] = 0;
it_prg[i].end.control = DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH;
it_prg[i].end.control = cpu_to_le32(DMA_CTL_OUTPUT_LAST |
DMA_CTL_BRANCH);
it_prg[i].end.address =
kvirt_to_bus(buf+i*d->packet_size);
cpu_to_le32(kvirt_to_bus(buf+i*d->packet_size));
if (i<d->nb_cmd-1) {
it_prg[i].end.control |= d->packet_size;
it_prg[i].end.control |= cpu_to_le32(d->packet_size);
it_prg[i].begin.branchAddress =
(virt_to_bus(&(it_prg[i+1].begin.control))
& 0xfffffff0) | 0x3;
cpu_to_le32((virt_to_bus(&(it_prg[i+1].begin.control))
& 0xfffffff0) | 0x3);
it_prg[i].end.branchAddress =
(virt_to_bus(&(it_prg[i+1].begin.control))
& 0xfffffff0) | 0x3;
cpu_to_le32((virt_to_bus(&(it_prg[i+1].begin.control))
& 0xfffffff0) | 0x3);
}
else {
/* the last prg generates an interrupt */
it_prg[i].end.control |= DMA_CTL_UPDATE |
DMA_CTL_IRQ | d->left_size;
it_prg[i].end.control |= cpu_to_le32(DMA_CTL_UPDATE |
DMA_CTL_IRQ | d->left_size);
/* the last prg doesn't branch */
it_prg[i].begin.branchAddress = 0;
it_prg[i].end.branchAddress = 0;
......@@ -798,21 +747,21 @@ static void initialize_dma_it_prg_var_packet_queue(
} else {
size = packet_sizes[i];
}
it_prg[i].data[1] = size << 16;
it_prg[i].end.control = DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH;
it_prg[i].data[1] = cpu_to_le32(size << 16);
it_prg[i].end.control = cpu_to_le32(DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH);
if (i < d->nb_cmd-1 && packet_sizes[i+1] != 0) {
it_prg[i].end.control |= size;
it_prg[i].end.control |= cpu_to_le32(size);
it_prg[i].begin.branchAddress =
(virt_to_bus(&(it_prg[i+1].begin.control))
& 0xfffffff0) | 0x3;
cpu_to_le32((virt_to_bus(&(it_prg[i+1].begin.control))
& 0xfffffff0) | 0x3);
it_prg[i].end.branchAddress =
(virt_to_bus(&(it_prg[i+1].begin.control))
& 0xfffffff0) | 0x3;
cpu_to_le32((virt_to_bus(&(it_prg[i+1].begin.control))
& 0xfffffff0) | 0x3);
} else {
/* the last prg generates an interrupt */
it_prg[i].end.control |= DMA_CTL_UPDATE |
DMA_CTL_IRQ | size;
it_prg[i].end.control |= cpu_to_le32(DMA_CTL_UPDATE |
DMA_CTL_IRQ | size);
/* the last prg doesn't branch */
it_prg[i].begin.branchAddress = 0;
it_prg[i].end.branchAddress = 0;
......@@ -1057,8 +1006,8 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
if (d->last_buffer>=0)
d->ir_prg[d->last_buffer][d->nb_cmd-1].branchAddress =
(virt_to_bus(&(d->ir_prg[v.buffer][0].control))
& 0xfffffff0) | 0x1;
cpu_to_le32((virt_to_bus(&(d->ir_prg[v.buffer][0].control))
& 0xfffffff0) | 0x1);
d->last_buffer = v.buffer;
......@@ -1217,14 +1166,14 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
d->it_prg[d->last_buffer]
[ d->last_used_cmd[d->last_buffer]
].end.branchAddress =
(virt_to_bus(&(d->it_prg[v.buffer][0].begin.control))
& 0xfffffff0) | 0x3;
cpu_to_le32((virt_to_bus(&(d->it_prg[v.buffer][0].begin.control))
& 0xfffffff0) | 0x3);
d->it_prg[d->last_buffer]
[d->last_used_cmd[d->last_buffer]
].begin.branchAddress =
(virt_to_bus(&(d->it_prg[v.buffer][0].begin.control))
& 0xfffffff0) | 0x3;
cpu_to_le32((virt_to_bus(&(d->it_prg[v.buffer][0].begin.control))
& 0xfffffff0) | 0x3);
d->next_buffer[d->last_buffer] = v.buffer;
}
d->last_buffer = v.buffer;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment