Commit 35d9ed07 authored by Linus Torvalds's avatar Linus Torvalds

Merge http://fbdev.bkbits.net/fbdev-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents f35e4d7d 322bdbf7
......@@ -299,6 +299,8 @@ KAO -->
EHCI, OHCI, or UHCI.
</para>
!Edrivers/usb/core/hcd.c
!Edrivers/usb/core/hcd-pci.c
!Edrivers/usb/core/buffer.c
</sect1>
</chapter>
......
......@@ -214,6 +214,7 @@
#include <linux/sched.h>
#include <linux/pm.h>
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <asm/system.h>
......@@ -419,6 +420,7 @@ static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
static struct apm_user * user_list;
static spinlock_t user_list_lock = SPIN_LOCK_UNLOCKED;
static struct desc_struct bad_bios_desc = { 0, 0x00409200 };
static char driver_version[] = "1.16"; /* no spaces */
......@@ -568,7 +570,12 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, u32 *esi)
{
APM_DECL_SEGS
unsigned long flags;
unsigned long flags;
int cpu = smp_processor_id();
struct desc_struct save_desc_40;
save_desc_40 = cpu_gdt_table[cpu][0x40 / 8];
cpu_gdt_table[cpu][0x40 / 8] = bad_bios_desc;
local_save_flags(flags);
APM_DO_CLI;
......@@ -591,6 +598,7 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
: "memory", "cc");
APM_DO_RESTORE_SEGS;
local_irq_restore(flags);
cpu_gdt_table[cpu][0x40 / 8] = save_desc_40;
return *eax & 0xff;
}
......@@ -613,6 +621,11 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
u8 error;
APM_DECL_SEGS
unsigned long flags;
int cpu = smp_processor_id();
struct desc_struct save_desc_40;
save_desc_40 = cpu_gdt_table[cpu][0x40 / 8];
cpu_gdt_table[cpu][0x40 / 8] = bad_bios_desc;
local_save_flags(flags);
APM_DO_CLI;
......@@ -639,6 +652,7 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
}
APM_DO_RESTORE_SEGS;
local_irq_restore(flags);
cpu_gdt_table[smp_processor_id()][0x40 / 8] = save_desc_40;
return error;
}
......@@ -931,9 +945,9 @@ static void handle_poweroff (int key, struct pt_regs *pt_regs,
}
static struct sysrq_key_op sysrq_poweroff_op = {
handler: handle_poweroff,
help_msg: "Off",
action_msg: "Power Off\n"
.handler = handle_poweroff,
.help_msg = "Off",
.action_msg = "Power Off\n"
};
......@@ -1826,12 +1840,12 @@ __setup("apm=", apm_setup);
#endif
static struct file_operations apm_bios_fops = {
owner: THIS_MODULE,
read: do_read,
poll: do_poll,
ioctl: do_ioctl,
open: do_open,
release: do_release,
.owner = THIS_MODULE,
.read = do_read,
.poll = do_poll,
.ioctl = do_ioctl,
.open = do_open,
.release = do_release,
};
static struct miscdevice apm_device = {
......@@ -1927,13 +1941,13 @@ static int __init apm_init(void)
* NOTE: on SMP we call into the APM BIOS only on CPU#0, so it's
* enough to modify CPU#0's GDT.
*/
for (i = 0; i < NR_CPUS; i++) {
set_base(cpu_gdt_table[i][APM_40 >> 3],
__va((unsigned long)0x40 << 4));
_set_limit((char *)&cpu_gdt_table[i][APM_40 >> 3], 4095 - (0x40 << 4));
set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
_set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
apm_bios_entry.offset = apm_info.bios.offset;
apm_bios_entry.segment = APM_CS;
apm_bios_entry.offset = apm_info.bios.offset;
apm_bios_entry.segment = APM_CS;
for (i = 0; i < NR_CPUS; i++) {
set_base(cpu_gdt_table[i][APM_CS >> 3],
__va((unsigned long)apm_info.bios.cseg << 4));
set_base(cpu_gdt_table[i][APM_CS_16 >> 3],
......
......@@ -307,7 +307,7 @@ static void do_mce_timer(void *data)
}
static struct tq_struct mce_task = {
routine: do_mce_timer
.routine = do_mce_timer
};
static void mce_timerfunc (unsigned long data)
......
......@@ -189,8 +189,8 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
}
static struct cpu_dev amd_cpu_dev __initdata = {
c_vendor: "AMD",
c_ident: { "AuthenticAMD" },
.c_vendor = "AMD",
.c_ident = { "AuthenticAMD" },
c_models: {
{ X86_VENDOR_AMD, 4,
{
......@@ -203,9 +203,9 @@ static struct cpu_dev amd_cpu_dev __initdata = {
}
},
},
c_init: init_amd,
c_identify: amd_identify,
c_size_cache: amd_size_cache,
.c_init = init_amd,
.c_identify = amd_identify,
.c_size_cache = amd_size_cache,
};
int __init amd_init_cpu(void)
......
......@@ -411,10 +411,10 @@ static unsigned int centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size
}
static struct cpu_dev centaur_cpu_dev __initdata = {
c_vendor: "Centaur",
c_ident: { "CentaurHauls" },
c_init: init_centaur,
c_size_cache: centaur_size_cache,
.c_vendor = "Centaur",
.c_ident = { "CentaurHauls" },
.c_init = init_centaur,
.c_size_cache = centaur_size_cache,
};
int __init centaur_init_cpu(void)
......
......@@ -31,7 +31,7 @@ static void default_init(struct cpuinfo_x86 * c)
}
static struct cpu_dev default_cpu = {
c_init: default_init,
.c_init = default_init,
};
static struct cpu_dev * this_cpu = &default_cpu;
......
......@@ -322,10 +322,10 @@ static void cyrix_identify(struct cpuinfo_x86 * c)
}
static struct cpu_dev cyrix_cpu_dev __initdata = {
c_vendor: "Cyrix",
c_ident: { "CyrixInstead" },
c_init: init_cyrix,
c_identify: cyrix_identify,
.c_vendor = "Cyrix",
.c_ident = { "CyrixInstead" },
.c_init = init_cyrix,
.c_identify = cyrix_identify,
};
int __init cyrix_init_cpu(void)
......@@ -337,10 +337,10 @@ int __init cyrix_init_cpu(void)
//early_arch_initcall(cyrix_init_cpu);
static struct cpu_dev nsc_cpu_dev __initdata = {
c_vendor: "NSC",
c_ident: { "Geode by NSC" },
c_init: init_cyrix,
c_identify: generic_identify,
.c_vendor = "NSC",
.c_ident = { "Geode by NSC" },
.c_init = init_cyrix,
.c_identify = generic_identify,
};
int __init nsc_init_cpu(void)
......
......@@ -327,8 +327,8 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
}
static struct cpu_dev intel_cpu_dev __initdata = {
c_vendor: "Intel",
c_ident: { "GenuineIntel" },
.c_vendor = "Intel",
.c_ident = { "GenuineIntel" },
c_models: {
{ X86_VENDOR_INTEL, 4,
{
......@@ -375,9 +375,9 @@ static struct cpu_dev intel_cpu_dev __initdata = {
}
},
},
c_init: init_intel,
c_identify: generic_identify,
c_size_cache: intel_size_cache,
.c_init = init_intel,
.c_identify = generic_identify,
.c_size_cache = intel_size_cache,
};
__init int intel_cpu_init(void)
......
......@@ -42,13 +42,13 @@ static void nexgen_identify(struct cpuinfo_x86 * c)
}
static struct cpu_dev nexgen_cpu_dev __initdata = {
c_vendor: "Nexgen",
c_ident: { "NexGenDriven" },
.c_vendor = "Nexgen",
.c_ident = { "NexGenDriven" },
c_models: {
{ X86_VENDOR_NEXGEN,5, { [1] "Nx586" } },
},
c_init: init_nexgen,
c_identify: nexgen_identify,
.c_init = init_nexgen,
.c_identify = nexgen_identify,
};
int __init nexgen_init_cpu(void)
......
......@@ -119,8 +119,8 @@ static void c_stop(struct seq_file *m, void *v)
{
}
struct seq_operations cpuinfo_op = {
start: c_start,
next: c_next,
stop: c_stop,
show: show_cpuinfo,
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
......@@ -29,8 +29,8 @@ static void __init init_rise(struct cpuinfo_x86 *c)
}
static struct cpu_dev rise_cpu_dev __initdata = {
c_vendor: "Rise",
c_ident: { "RiseRiseRise" },
.c_vendor = "Rise",
.c_ident = { "RiseRiseRise" },
c_models: {
{ X86_VENDOR_RISE, 5,
{
......@@ -41,7 +41,7 @@ static struct cpu_dev rise_cpu_dev __initdata = {
}
},
},
c_init: init_rise,
.c_init = init_rise,
};
int __init rise_init_cpu(void)
......
......@@ -80,10 +80,10 @@ static void transmeta_identify(struct cpuinfo_x86 * c)
}
static struct cpu_dev transmeta_cpu_dev __initdata = {
c_vendor: "Transmeta",
c_ident: { "GenuineTMx86", "TransmetaCPU" },
c_init: init_transmeta,
c_identify: transmeta_identify,
.c_vendor = "Transmeta",
.c_ident = { "GenuineTMx86", "TransmetaCPU" },
.c_init = init_transmeta,
.c_identify = transmeta_identify,
};
int __init transmeta_init_cpu(void)
......
......@@ -11,8 +11,8 @@ static void __init init_umc(struct cpuinfo_x86 * c)
}
static struct cpu_dev umc_cpu_dev __initdata = {
c_vendor: "UMC",
c_ident: { "UMC UMC UMC" },
.c_vendor = "UMC",
.c_ident = { "UMC UMC UMC" },
c_models: {
{ X86_VENDOR_UMC, 4,
{
......@@ -21,7 +21,7 @@ static struct cpu_dev umc_cpu_dev __initdata = {
}
},
},
c_init: init_umc,
.c_init = init_umc,
};
int __init umc_init_cpu(void)
......
......@@ -146,10 +146,10 @@ static int cpuid_open(struct inode *inode, struct file *file)
* File operations we support
*/
static struct file_operations cpuid_fops = {
owner: THIS_MODULE,
llseek: cpuid_seek,
read: cpuid_read,
open: cpuid_open,
.owner = THIS_MODULE,
.llseek = cpuid_seek,
.read = cpuid_read,
.open = cpuid_open,
};
int __init cpuid_init(void)
......
......@@ -247,13 +247,13 @@ static int i8259A_resume(struct device *dev, u32 level)
}
static struct device_driver driver_i8259A = {
resume: i8259A_resume,
.resume = i8259A_resume,
};
static struct device device_i8259A = {
name: "i8259A",
bus_id: "0020",
driver: &driver_i8259A,
.name = "i8259A",
.bus_id = "0020",
.driver = &driver_i8259A,
};
static int __init init_8259A_devicefs(void)
......
......@@ -109,17 +109,17 @@ static unsigned int mc_fsize; /* file size of /dev/cpu/microcode */
/* we share file_operations between misc and devfs mechanisms */
static struct file_operations microcode_fops = {
owner: THIS_MODULE,
read: microcode_read,
write: microcode_write,
ioctl: microcode_ioctl,
open: microcode_open,
.owner = THIS_MODULE,
.read = microcode_read,
.write = microcode_write,
.ioctl = microcode_ioctl,
.open = microcode_open,
};
static struct miscdevice microcode_dev = {
minor: MICROCODE_MINOR,
name: "microcode",
fops: &microcode_fops,
.minor = MICROCODE_MINOR,
.name = "microcode",
.fops = &microcode_fops,
};
static devfs_handle_t devfs_handle;
......
......@@ -246,11 +246,11 @@ static int msr_open(struct inode *inode, struct file *file)
* File operations we support
*/
static struct file_operations msr_fops = {
owner: THIS_MODULE,
llseek: msr_seek,
read: msr_read,
write: msr_write,
open: msr_open,
.owner = THIS_MODULE,
.llseek = msr_seek,
.read = msr_read,
.write = msr_write,
.open = msr_open,
};
int __init msr_init(void)
......
......@@ -1836,11 +1836,11 @@ static int mtrr_close (struct inode *ino, struct file *file)
static struct file_operations mtrr_fops =
{
owner: THIS_MODULE,
read: mtrr_read,
write: mtrr_write,
ioctl: mtrr_ioctl,
release: mtrr_close,
.owner = THIS_MODULE,
.read = mtrr_read,
.write = mtrr_write,
.ioctl = mtrr_ioctl,
.release = mtrr_close,
};
# ifdef CONFIG_PROC_FS
......
......@@ -639,8 +639,8 @@ static unsigned long __init calibrate_tsc(void)
}
static struct device device_i8253 = {
name: "i8253",
bus_id: "0040",
.name = "i8253",
.bus_id = "0040",
};
static int time_init_driverfs(void)
......
......@@ -159,7 +159,7 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag
area->phys_addr = phys_addr;
addr = area->addr;
if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) {
vfree(addr);
vunmap(addr);
return NULL;
}
return (void *) (offset + (char *)addr);
......@@ -215,13 +215,13 @@ void iounmap(void *addr)
struct vm_struct *p;
if (addr <= high_memory)
return;
p = remove_kernel_area((void *) (PAGE_MASK & (unsigned long) addr));
p = remove_vm_area((void *) (PAGE_MASK & (unsigned long) addr));
if (!p) {
printk("__iounmap: bad address %p\n", addr);
return;
}
vmfree_area_pages(VMALLOC_VMADDR(p->addr), p->size);
unmap_vm_area(p);
if (p->flags && p->phys_addr < virt_to_phys(high_memory)) {
change_page_attr(virt_to_page(__va(p->phys_addr)),
p->size >> PAGE_SHIFT,
......
......@@ -4,6 +4,8 @@
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Based upon code written by Linus Torvalds and others.
*/
#warning "major untested changes to this file --hch (2002/08/05)"
#include <linux/slab.h>
#include <linux/vmalloc.h>
......@@ -16,6 +18,7 @@ static struct vm_struct * modvmlist = NULL;
void module_unmap (void * addr)
{
struct vm_struct **p, *tmp;
int i;
if (!addr)
return;
......@@ -23,21 +26,38 @@ void module_unmap (void * addr)
printk("Trying to unmap module with bad address (%p)\n", addr);
return;
}
for (p = &modvmlist ; (tmp = *p) ; p = &tmp->next) {
if (tmp->addr == addr) {
*p = tmp->next;
vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
kfree(tmp);
return;
}
}
printk("Trying to unmap nonexistent module vm area (%p)\n", addr);
return;
found:
unmap_vm_area(tmp);
for (i = 0; i < tmp->nr_pages; i++) {
if (unlikely(!tmp->pages[i]))
BUG();
__free_page(tmp->pages[i]);
}
kfree(tmp->pages);
kfree(tmp);
}
void * module_map (unsigned long size)
{
void * addr;
struct vm_struct **p, *tmp, *area;
struct vm_struct *area;
struct page **pages;
void * addr;
unsigned int nr_pages, array_size, i;
size = PAGE_ALIGN(size);
if (!size || size > MODULES_LEN) return NULL;
......@@ -55,11 +75,32 @@ void * module_map (unsigned long size)
area->size = size + PAGE_SIZE;
area->addr = addr;
area->next = *p;
area->pages = NULL;
area->nr_pages = 0;
area->phys_addr = 0;
*p = area;
if (vmalloc_area_pages(VMALLOC_VMADDR(addr), size, GFP_KERNEL, PAGE_KERNEL)) {
module_unmap(addr);
nr_pages = (size+PAGE_SIZE) >> PAGE_SHIFT;
array_size = (nr_pages * sizeof(struct page *));
area->nr_pages = nr_pages;
area->pages = pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM));
if (!area->pages)
return NULL;
memset(area->pages, 0, array_size);
for (i = 0; i < area->nr_pages; i++) {
area->pages[i] = alloc_page(gfp_mask);
if (unlikely(!area->pages[i]))
goto fail;
}
return addr;
if (map_vm_area(area, prot, &pages))
goto fail;
return area->addr;
fail:
vfree(area->addr);
return NULL;
}
}
......@@ -105,10 +105,9 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
unsigned int cmd, unsigned long arg);
static int revalidate_allvol(kdev_t dev);
static int revalidate_logvol(kdev_t dev, int maxusage);
static int cciss_revalidate(kdev_t dev);
static int deregister_disk(int ctlr, int logvol);
static int register_new_disk(kdev_t dev, int cltr);
static int register_new_disk(int cltr);
static void cciss_getgeometry(int cntl_num);
......@@ -333,27 +332,6 @@ static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
}
}
/*
* fills in the disk information.
*/
static void cciss_geninit( int ctlr)
{
drive_info_struct *drv;
int i;
/* Loop through each real device */
hba[ctlr]->gendisk.nr_real = 0;
for(i=0; i< NWD; i++)
{
drv = &(hba[ctlr]->drv[i]);
if( !(drv->nr_blocks))
continue;
hba[ctlr]->hd[i << NWD_SHIFT].nr_sects = drv->nr_blocks;
//hba[ctlr]->gendisk.nr_real++;
(BLK_DEFAULT_QUEUE(MAJOR_NR + ctlr))->hardsect_size = drv->block_size;
}
hba[ctlr]->gendisk.nr_real = hba[ctlr]->highest_lun+1;
}
/*
* Open. Make sure the device is really there.
*/
......@@ -599,7 +577,7 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
case CCISS_REGNEWD:
{
return(register_new_disk(inode->i_rdev, ctlr));
return(register_new_disk(ctlr));
}
case CCISS_PASSTHRU:
{
......@@ -726,49 +704,11 @@ static int cciss_revalidate(kdev_t dev)
{
int ctlr = major(dev) - MAJOR_NR;
int target = minor(dev) >> NWD_SHIFT;
struct gendisk *gdev = &(hba[ctlr]->gendisk);
gdev->part[minor(dev)].nr_sects = hba[ctlr]->drv[target].nr_blocks;
struct gendisk *disk = &hba[ctlr]->gendisk[target];
disk->part[0].nr_sects = hba[ctlr]->drv[target].nr_blocks;
return 0;
}
/* Borrowed and adapted from sd.c */
/*
* FIXME: we are missing the exclusion with ->open() here - it can happen
* just as we are rereading partition tables.
*/
static int revalidate_logvol(kdev_t dev, int maxusage)
{
int ctlr, target;
struct gendisk *gdev;
unsigned long flags;
int res;
target = minor(dev) >> NWD_SHIFT;
ctlr = major(dev) - MAJOR_NR;
gdev = &(hba[ctlr]->gendisk);
spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
if (hba[ctlr]->drv[target].usage_count > maxusage) {
spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
printk(KERN_WARNING "cciss: Device busy for "
"revalidation (usage=%d)\n",
hba[ctlr]->drv[target].usage_count);
return -EBUSY;
}
hba[ctlr]->drv[target].usage_count++;
spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
res = wipe_partitions(dev);
if (res)
goto leave;
/* setup partitions per disk */
grok_partitions(dev, hba[ctlr]->drv[target].nr_blocks);
leave:
hba[ctlr]->drv[target].usage_count--;
return res;
}
/*
* revalidate_allvol is for online array config utilities. After a
* utility reconfigures the drives in the array, it can use this function
......@@ -799,6 +739,15 @@ static int revalidate_allvol(kdev_t dev)
hba[ctlr]->usage_count++;
spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
for(i=0; i< NWD; i++) {
struct gendisk *disk = &hba[ctlr]->gendisk[i];
if (disk->major_name) {
wipe_partitions(mk_kdev(disk->major, disk->first_minor));
del_gendisk(disk);
disk->major_name = NULL;
}
}
/*
* Set the partition and block size structures for all volumes
* on this controller to zero. We will reread all of this data
......@@ -806,8 +755,6 @@ static int revalidate_allvol(kdev_t dev)
memset(hba[ctlr]->hd, 0, sizeof(struct hd_struct) * 256);
memset(hba[ctlr]->drv, 0, sizeof(drive_info_struct)
* CISS_MAX_LUN);
hba[ctlr]->gendisk.nr_real = 0;
/*
* Tell the array controller not to give us any interrupts while
* we check the new geometry. Then turn interrupts back on when
......@@ -817,13 +764,20 @@ static int revalidate_allvol(kdev_t dev)
cciss_getgeometry(ctlr);
hba[ctlr]->access.set_intr_mask(hba[ctlr], CCISS_INTR_ON);
cciss_geninit(ctlr);
for(i=0; i<NWD; i++) {
kdev_t kdev = mk_kdev(major(dev), i << NWD_SHIFT);
if (hba[ctlr]->gendisk.part[ i<<NWD_SHIFT ].nr_sects)
revalidate_logvol(kdev, 2);
/* Loop through each real device */
for (i = 0; i < NWD; i++) {
struct gendisk *disk = &hba[ctlr]->gendisk[i];
drive_info_struct *drv = &(hba[ctlr]->drv[i]);
if (!drv->nr_blocks)
continue;
(BLK_DEFAULT_QUEUE(MAJOR_NR + ctlr))->hardsect_size = drv->block_size;
disk->major_name = hba[ctlr]->names + 12 * i;
add_gendisk(disk);
register_disk(disk,
mk_kdev(disk->major, disk->first_minor),
1<<disk->minor_shift, disk->fops,
drv->nr_blocks);
}
hba[ctlr]->usage_count--;
return 0;
}
......@@ -831,18 +785,15 @@ static int revalidate_allvol(kdev_t dev)
static int deregister_disk(int ctlr, int logvol)
{
unsigned long flags;
struct gendisk *gdev = &(hba[ctlr]->gendisk);
struct gendisk *disk = &hba[ctlr]->gendisk[logvol];
ctlr_info_t *h = hba[ctlr];
int start, max_p;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
/* make sure logical volume is NOT is use */
if( h->drv[logvol].usage_count > 1)
{
if( h->drv[logvol].usage_count > 1) {
spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
return -EBUSY;
}
......@@ -850,25 +801,24 @@ static int deregister_disk(int ctlr, int logvol)
spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
/* invalidate the devices and deregister the disk */
max_p = 1 << gdev->minor_shift;
start = logvol << gdev->minor_shift;
wipe_partitions(mk_kdev(MAJOR_NR+ctlr, start));
if (disk->major_name) {
wipe_partitions(mk_kdev(disk->major, disk->first_minor));
del_gendisk(disk);
disk->major_name = NULL;
}
/* check to see if it was the last disk */
if (logvol == h->highest_lun)
{
if (logvol == h->highest_lun) {
/* if so, find the new hightest lun */
int i, newhighest =-1;
for(i=0; i<h->highest_lun; i++)
{
for(i=0; i<h->highest_lun; i++) {
/* if the disk has size > 0, it is available */
if (h->gendisk.part[i << gdev->minor_shift].nr_sects)
if (h->drv[i].nr_blocks)
newhighest = i;
}
h->highest_lun = newhighest;
}
--h->num_luns;
gdev->nr_real = h->highest_lun+1;
/* zero out the disk size info */
h->drv[logvol].nr_blocks = 0;
h->drv[logvol].block_size = 0;
......@@ -1065,11 +1015,11 @@ case CMD_HARDWARE_ERR:
return(return_status);
}
static int register_new_disk(kdev_t dev, int ctlr)
static int register_new_disk(int ctlr)
{
struct gendisk *gdev = &(hba[ctlr]->gendisk);
struct gendisk *disk;
ctlr_info_t *h = hba[ctlr];
int start, max_p, i;
int i;
int num_luns;
int logvol;
int new_lun_found = 0;
......@@ -1084,7 +1034,6 @@ static int register_new_disk(kdev_t dev, int ctlr)
__u32 lunid = 0;
unsigned int block_size;
unsigned int total_size;
kdev_t kdev;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
......@@ -1292,15 +1241,16 @@ static int register_new_disk(kdev_t dev, int ctlr)
hba[ctlr]->drv[logvol].sectors,
hba[ctlr]->drv[logvol].cylinders);
hba[ctlr]->drv[logvol].usage_count = 0;
max_p = 1 << gdev->minor_shift;
start = logvol<< gdev->minor_shift;
kdev = mk_kdev(MAJOR_NR + ctlr, logvol<< gdev->minor_shift);
wipe_partitions(kdev);
++hba[ctlr]->num_luns;
gdev->nr_real = hba[ctlr]->highest_lun + 1;
/* setup partitions per disk */
grok_partitions(kdev, hba[ctlr]->drv[logvol].nr_blocks);
disk = &hba[ctlr]->gendisk[logvol];
disk->major_name = hba[ctlr]->names + 12 * logvol;
add_gendisk(disk);
register_disk(disk,
mk_kdev(disk->major, disk->first_minor),
1<<disk->minor_shift,
disk->fops,
hba[ctlr]->drv[logvol].nr_blocks);
kfree(ld_buff);
kfree(size_buff);
......@@ -2488,22 +2438,28 @@ static int __init cciss_init_one(struct pci_dev *pdev,
blk_queue_max_sectors(q, 512);
/* Fill in the gendisk data */
hba[i]->gendisk.major = MAJOR_NR + i;
hba[i]->gendisk.major_name = "cciss";
hba[i]->gendisk.minor_shift = NWD_SHIFT;
hba[i]->gendisk.part = hba[i]->hd;
hba[i]->gendisk.nr_real = hba[i]->highest_lun+1;
/* Get on the disk list */
add_gendisk(&(hba[i]->gendisk));
for(j=0; j<NWD; j++) {
drive_info_struct *drv = &(hba[i]->drv[j]);
struct gendisk *disk = hba[i]->gendisk + j;
cciss_geninit(i);
for(j=0; j<NWD; j++)
register_disk(&(hba[i]->gendisk),
mk_kdev(MAJOR_NR+i, j <<4),
MAX_PART, &cciss_fops,
hba[i]->drv[j].nr_blocks);
sprintf(hba[i]->names + 12 * j, "cciss/c%dd%d", i, j);
disk->major = MAJOR_NR + i;
disk->first_minor = j << NWD_SHIFT;
disk->major_name = NULL;
disk->minor_shift = NWD_SHIFT;
disk->part = hba[i]->hd + (j << NWD_SHIFT);
disk->nr_real = 1;
if( !(drv->nr_blocks))
continue;
(BLK_DEFAULT_QUEUE(MAJOR_NR + i))->hardsect_size = drv->block_size;
disk->major_name = hba[i]->names + 12 * j;
add_gendisk(disk);
register_disk(disk,
mk_kdev(disk->major, disk->first_minor),
1<<disk->minor_shift, disk->fops,
drv->nr_blocks);
}
cciss_register_scsi(i, 1); /* hook ourself into SCSI subsystem */
......@@ -2513,7 +2469,7 @@ static int __init cciss_init_one(struct pci_dev *pdev,
static void __devexit cciss_remove_one (struct pci_dev *pdev)
{
ctlr_info_t *tmp_ptr;
int i;
int i, j;
char flush_buf[4];
int return_code;
......@@ -2548,7 +2504,13 @@ static void __devexit cciss_remove_one (struct pci_dev *pdev)
remove_proc_entry(hba[i]->devname, proc_cciss);
/* remove it from the disk list */
del_gendisk(&(hba[i]->gendisk));
for (j = 0; j < NWD; j++) {
struct gendisk *disk = &hba[i]->gendisk[j];
if (disk->major_name) {
del_gendisk(disk);
disk->major_name = NULL;
}
}
pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
......
......@@ -81,7 +81,8 @@ struct ctlr_info
int nr_frees;
// Disk structures we need to pass back
struct gendisk gendisk;
struct gendisk gendisk[NWD];
char names[12 * NWD];
// indexed by minor numbers
struct hd_struct hd[256];
int sizes[256];
......
......@@ -1483,6 +1483,7 @@ static int revalidate_allvol(kdev_t dev)
if (!drv->nr_blks)
continue;
(BLK_DEFAULT_QUEUE(MAJOR_NR + ctlr))->hardsect_size = drv->blk_size;
disk->major_name = ida_names + (ctlr*NWD+i)*10;
add_gendisk(disk);
register_disk(disk,
mk_kdev(disk->major,disk->first_minor),
......
......@@ -969,6 +969,31 @@ CONFIG_RTC
The module is called rtc.o. If you want to compile it as a module,
say M here and read <file:Documentation/modules.txt>.
Generic Real Time Clock Support
CONFIG_GEN_RTC
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
will get access to the real time clock (or hardware clock) built
into your computer.
It reports status information via the file /proc/driver/rtc and its
behaviour is set by various ioctls on /dev/rtc. If you enable the
"extended RTC operation" below it will also provide an emulation
for RTC_UIE which is required by some programs and may improve
precision in some cases.
This driver is also available as a module ( = code which can be
inserted in and removed from the running kernel whenever you want).
The module is called genrtc.o. If you want to compile it as a module,
say M here and read <file:Documentation/modules.txt>. To load the
module automaticaly add 'alias char-major-10-135 genrtc' to your
/etc/modules.conf
Extended RTC operation
CONFIG_GEN_RTC_X
Provides an emulation for RTC_UIE which is required by some programs
and may improve precision of the generic RTC support in some cases.
CONFIG_H8
The Hitachi H8/337 is a microcontroller used to deal with the power
and thermal environment. If you say Y here, you will be able to
......
......@@ -151,6 +151,12 @@ if [ "$CONFIG_X86" = "y" -o "$CONFIG_IA64" = "y" ]; then
fi
tristate '/dev/nvram support' CONFIG_NVRAM
tristate 'Enhanced Real Time Clock Support' CONFIG_RTC
if [ "$CONFIG_RTC" != "y" ]; then
tristate 'Generic /dev/rtc emulation' CONFIG_GEN_RTC
if [ "$CONFIG_GEN_RTC" != "n" ]; then
bool ' Extended RTC operation' CONFIG_GEN_RTC_X
fi
fi
if [ "$CONFIG_IA64" = "y" ]; then
bool 'EFI Real Time Clock Services' CONFIG_EFI_RTC
fi
......
......@@ -167,6 +167,7 @@ obj-$(CONFIG_APPLICOM) += applicom.o
obj-$(CONFIG_SONYPI) += sonypi.o
obj-$(CONFIG_ATARIMOUSE) += atarimouse.o
obj-$(CONFIG_RTC) += rtc.o
obj-$(CONFIG_GEN_RTC) += genrtc.o
obj-$(CONFIG_EFI_RTC) += efirtc.o
ifeq ($(CONFIG_PPC),)
obj-$(CONFIG_NVRAM) += nvram.o
......
/*
* Real Time Clock interface for q40 and other m68k machines
* emulate some RTC irq capabilities in software
*
* Copyright (C) 1999 Richard Zidlicky
*
* based on Paul Gortmaker's rtc.c device and
* Sam Creasey Generic rtc driver
*
* This driver allows use of the real time clock (built into
* nearly all computers) from user space. It exports the /dev/rtc
* interface supporting various ioctl() and also the /proc/dev/rtc
* pseudo-file for status information.
*
* The ioctls can be used to set the interrupt behaviour where
* supported.
*
* The /dev/rtc interface will block on reads until an interrupt
* has been received. If a RTC interrupt has already happened,
* it will output an unsigned long and then block. The output value
* contains the interrupt status in the low byte and the number of
* interrupts since the last read in the remaining high bytes. The
* /dev/rtc interface can also be used with the select(2) call.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* 1.01 fix for 2.3.X rz@linux-m68k.org
* 1.02 merged with code from genrtc.c rz@linux-m68k.org
* 1.03 make it more portable zippel@linux-m68k.org
* 1.04 removed useless timer code rz@linux-m68k.org
* 1.05 portable RTC_UIE emulation rz@linux-m68k.org
* 1.06 set_rtc_time can return an error trini@kernel.crashing.org
*/
#define RTC_VERSION "1.06"
#include <linux/module.h>
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
#include <linux/fcntl.h>
#include <linux/rtc.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/tqueue.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/rtc.h>
/*
* We sponge a minor off of the misc major. No need slurping
* up another valuable major dev number for this. If you add
* an ioctl, make sure you don't conflict with SPARC's RTC
* ioctls.
*/
static DECLARE_WAIT_QUEUE_HEAD(gen_rtc_wait);
static int gen_rtc_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg);
/*
* Bits in gen_rtc_status.
*/
#define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */
unsigned char gen_rtc_status; /* bitmapped status byte. */
unsigned long gen_rtc_irq_data; /* our output to the world */
/* months start at 0 now */
unsigned char days_in_mo[] =
{31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
static int irq_active;
#ifdef CONFIG_GEN_RTC_X
struct tq_struct genrtc_task;
static struct timer_list timer_task;
static unsigned int oldsecs;
static int lostint;
static int tt_exp;
void gen_rtc_timer(unsigned long data);
static volatile int stask_active; /* schedule_task */
static volatile int ttask_active; /* timer_task */
static int stop_rtc_timers; /* don't requeue tasks */
static spinlock_t gen_rtc_lock = SPIN_LOCK_UNLOCKED;
/*
* Routine to poll RTC seconds field for change as often as posible,
* after first RTC_UIE use timer to reduce polling
*/
void genrtc_troutine(void *data)
{
unsigned int tmp = get_rtc_ss();
if (stop_rtc_timers) {
stask_active = 0;
return;
}
if (oldsecs != tmp){
oldsecs = tmp;
timer_task.function = gen_rtc_timer;
timer_task.expires = jiffies + HZ - (HZ/10);
tt_exp=timer_task.expires;
ttask_active=1;
stask_active=0;
add_timer(&timer_task);
gen_rtc_interrupt(0);
} else if (schedule_task(&genrtc_task) == 0)
stask_active = 0;
}
void gen_rtc_timer(unsigned long data)
{
lostint = get_rtc_ss() - oldsecs ;
if (lostint<0)
lostint = 60 - lostint;
if (time_after(jiffies, tt_exp))
printk(KERN_INFO "genrtc: timer task delayed by %ld jiffies\n",
jiffies-tt_exp);
ttask_active=0;
stask_active=1;
if ((schedule_task(&genrtc_task) == 0))
stask_active = 0;
}
/*
* call gen_rtc_interrupt function to signal an RTC_UIE,
* arg is unused.
* Could be invoked either from a real interrupt handler or
* from some routine that periodically (eg 100HZ) monitors
* whether RTC_SECS changed
*/
void gen_rtc_interrupt(unsigned long arg)
{
/* We store the status in the low byte and the number of
* interrupts received since the last read in the remainder
* of rtc_irq_data. */
gen_rtc_irq_data += 0x100;
gen_rtc_irq_data &= ~0xff;
gen_rtc_irq_data |= RTC_UIE;
if (lostint){
printk("genrtc: system delaying clock ticks?\n");
/* increment count so that userspace knows something is wrong */
gen_rtc_irq_data += ((lostint-1)<<8);
lostint = 0;
}
wake_up_interruptible(&gen_rtc_wait);
}
/*
* Now all the various file operations that we export.
*/
static ssize_t gen_rtc_read(struct file *file, char *buf,
size_t count, loff_t *ppos)
{
DECLARE_WAITQUEUE(wait, current);
unsigned long data;
ssize_t retval;
if (count != sizeof (unsigned int) && count != sizeof (unsigned long))
return -EINVAL;
if (file->f_flags & O_NONBLOCK && !gen_rtc_irq_data)
return -EAGAIN;
add_wait_queue(&gen_rtc_wait, &wait);
retval = -ERESTARTSYS;
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
data = xchg(&gen_rtc_irq_data, 0);
if (data)
break;
if (signal_pending(current))
goto out;
schedule();
}
/* first test allows optimizer to nuke this case for 32-bit machines */
if (sizeof (int) != sizeof (long) && count == sizeof (unsigned int)) {
unsigned int uidata = data;
retval = put_user(uidata, (unsigned long *)buf);
}
else {
retval = put_user(data, (unsigned long *)buf);
}
if (!retval)
retval = sizeof(unsigned long);
out:
current->state = TASK_RUNNING;
remove_wait_queue(&gen_rtc_wait, &wait);
return retval;
}
static unsigned int gen_rtc_poll(struct file *file,
struct poll_table_struct *wait)
{
poll_wait(file, &gen_rtc_wait, wait);
if (gen_rtc_irq_data != 0)
return POLLIN | POLLRDNORM;
return 0;
}
#endif
/*
* Used to disable/enable interrupts, only RTC_UIE supported
* We also clear out any old irq data after an ioctl() that
* meddles with the interrupt enable/disable bits.
*/
static inline void gen_clear_rtc_irq_bit(unsigned char bit)
{
#ifdef CONFIG_GEN_RTC_X
stop_rtc_timers = 1;
if (ttask_active){
del_timer_sync(&timer_task);
ttask_active = 0;
}
while (stask_active)
schedule();
spin_lock(&gen_rtc_lock);
irq_active = 0;
spin_unlock(&gen_rtc_lock);
#endif
}
static inline int gen_set_rtc_irq_bit(unsigned char bit)
{
#ifdef CONFIG_GEN_RTC_X
spin_lock(&gen_rtc_lock);
if ( !irq_active ) {
irq_active = 1;
stop_rtc_timers = 0;
lostint = 0;
genrtc_task.routine = genrtc_troutine;
oldsecs = get_rtc_ss();
init_timer(&timer_task);
stask_active = 1;
if (schedule_task(&genrtc_task) == 0){
stask_active = 0;
}
}
spin_unlock(&gen_rtc_lock);
gen_rtc_irq_data = 0;
return 0;
#else
return -EINVAL;
#endif
}
static int gen_rtc_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct rtc_time wtime;
struct rtc_pll_info pll;
switch (cmd) {
case RTC_PLL_GET:
if (get_rtc_pll(&pll))
return -EINVAL;
else
return copy_to_user((void *)arg, &pll, sizeof pll) ? -EFAULT : 0;
case RTC_PLL_SET:
if (!capable(CAP_SYS_TIME))
return -EACCES;
if (copy_from_user(&pll, (struct rtc_pll_info*)arg,
sizeof(pll)))
return -EFAULT;
return set_rtc_pll(&pll);
case RTC_UIE_OFF: /* disable ints from RTC updates. */
gen_clear_rtc_irq_bit(RTC_UIE);
return 0;
case RTC_UIE_ON: /* enable ints for RTC updates. */
return gen_set_rtc_irq_bit(RTC_UIE);
case RTC_RD_TIME: /* Read the time/date from RTC */
/* this doesn't get week-day, who cares */
memset(&wtime, 0, sizeof(wtime));
get_rtc_time(&wtime);
return copy_to_user((void *)arg, &wtime, sizeof(wtime)) ? -EFAULT : 0;
case RTC_SET_TIME: /* Set the RTC */
{
int year;
unsigned char leap_yr;
if (!capable(CAP_SYS_TIME))
return -EACCES;
if (copy_from_user(&wtime, (struct rtc_time *)arg,
sizeof(wtime)))
return -EFAULT;
year = wtime.tm_year + 1900;
leap_yr = ((!(year % 4) && (year % 100)) ||
!(year % 400));
if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) || (wtime.tm_mday < 1))
return -EINVAL;
if (wtime.tm_mday < 0 || wtime.tm_mday >
(days_in_mo[wtime.tm_mon] + ((wtime.tm_mon == 1) && leap_yr)))
return -EINVAL;
if (wtime.tm_hour < 0 || wtime.tm_hour >= 24 ||
wtime.tm_min < 0 || wtime.tm_min >= 60 ||
wtime.tm_sec < 0 || wtime.tm_sec >= 60)
return -EINVAL;
return set_rtc_time(&wtime);
}
}
return -EINVAL;
}
/*
* We enforce only one user at a time here with the open/close.
* Also clear the previous interrupt data on an open, and clean
* up things on a close.
*/
static int gen_rtc_open(struct inode *inode, struct file *file)
{
if (gen_rtc_status & RTC_IS_OPEN)
return -EBUSY;
MOD_INC_USE_COUNT;
gen_rtc_status |= RTC_IS_OPEN;
gen_rtc_irq_data = 0;
irq_active = 0;
return 0;
}
static int gen_rtc_release(struct inode *inode, struct file *file)
{
/*
* Turn off all interrupts once the device is no longer
* in use and clear the data.
*/
gen_clear_rtc_irq_bit(RTC_PIE|RTC_AIE|RTC_UIE);
gen_rtc_status &= ~RTC_IS_OPEN;
MOD_DEC_USE_COUNT;
return 0;
}
static int gen_rtc_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data);
/*
* The various file operations we support.
*/
static struct file_operations gen_rtc_fops = {
.owner = THIS_MODULE,
#ifdef CONFIG_GEN_RTC_X
.read = gen_rtc_read,
.poll = gen_rtc_poll,
#endif
.ioctl = gen_rtc_ioctl,
.open = gen_rtc_open,
.release = gen_rtc_release
};
static struct miscdevice rtc_gen_dev =
{
RTC_MINOR,
"rtc",
&gen_rtc_fops
};
int __init rtc_generic_init(void)
{
printk(KERN_INFO "Generic RTC Driver v%s\n", RTC_VERSION);
misc_register(&rtc_gen_dev);
create_proc_read_entry ("driver/rtc", 0, 0, gen_rtc_read_proc, NULL);
return 0;
}
static void __exit rtc_generic_exit(void)
{
remove_proc_entry ("driver/rtc", NULL);
misc_deregister(&rtc_gen_dev);
}
module_init(rtc_generic_init);
module_exit(rtc_generic_exit);
EXPORT_NO_SYMBOLS;
/*
* Info exported via "/proc/rtc".
*/
int gen_rtc_proc_output(char *buf)
{
char *p;
struct rtc_time tm;
unsigned tmp;
struct rtc_pll_info pll;
p = buf;
get_rtc_time(&tm);
p += sprintf(p,
"rtc_time\t: %02d:%02d:%02d\n"
"rtc_date\t: %04d-%02d-%02d\n"
"rtc_epoch\t: %04u\n",
tm.tm_hour, tm.tm_min, tm.tm_sec,
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, 1900);
tm.tm_hour=0;tm.tm_min=0;tm.tm_sec=0;
p += sprintf(p, "alarm\t\t: ");
if (tm.tm_hour <= 24)
p += sprintf(p, "%02d:", tm.tm_hour);
else
p += sprintf(p, "**:");
if (tm.tm_min <= 59)
p += sprintf(p, "%02d:", tm.tm_min);
else
p += sprintf(p, "**:");
if (tm.tm_sec <= 59)
p += sprintf(p, "%02d\n", tm.tm_sec);
else
p += sprintf(p, "**\n");
tmp= RTC_24H ;
p += sprintf(p,
"DST_enable\t: %s\n"
"BCD\t\t: %s\n"
"24hr\t\t: %s\n"
"square_wave\t: %s\n"
"alarm_IRQ\t: %s\n"
"update_IRQ\t: %s\n"
"periodic_IRQ\t: %s\n"
"periodic_freq\t: %ld\n"
"batt_status\t: %s\n",
(tmp & RTC_DST_EN) ? "yes" : "no",
(tmp & RTC_DM_BINARY) ? "no" : "yes",
(tmp & RTC_24H) ? "yes" : "no",
(tmp & RTC_SQWE) ? "yes" : "no",
(tmp & RTC_AIE) ? "yes" : "no",
irq_active ? "yes" : "no",
(tmp & RTC_PIE) ? "yes" : "no",
0L /* freq */,
"okay" );
if (!get_rtc_pll(&pll))
p += sprintf(p,
"PLL adjustment\t: %d\n"
"PLL max +ve adjustment\t: %d\n"
"PLL max -ve adjustment\t: %d\n"
"PLL +ve adjustment factor\t: %d\n"
"PLL -ve adjustment factor\t: %d\n"
"PLL frequency\t: %ld\n",
pll.pll_value,
pll.pll_max,
pll.pll_min,
pll.pll_posmult,
pll.pll_negmult,
pll.pll_clock);
return p - buf;
}
static int gen_rtc_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
int len = gen_rtc_proc_output (page);
if (len <= off+count) *eof = 1;
*start = page + off;
len -= off;
if (len>count) len = count;
if (len<0) len = 0;
return len;
}
MODULE_AUTHOR("Richard Zidlicky");
MODULE_LICENSE("GPL");
......@@ -210,6 +210,9 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
return 0;
}
extern long vread(char *buf, char *addr, unsigned long count);
extern long vwrite(char *buf, char *addr, unsigned long count);
/*
* This function reads the *virtual* memory as seen by the kernel.
*/
......@@ -273,8 +276,6 @@ static ssize_t read_kmem(struct file *file, char *buf,
return virtr + read;
}
extern long vwrite(char *buf, char *addr, unsigned long count);
/*
* This function writes to the *virtual* memory as seen by the kernel.
*/
......
......@@ -32,10 +32,10 @@ if [ "$CONFIG_I2C" != "n" ]; then
dep_tristate ' Embedded Planet RPX Lite/Classic suppoort' CONFIG_I2C_RPXLITE $CONFIG_I2C_ALGO8XX
fi
fi
if [ "$CONFIG_405" = "y" ]; then
dep_tristate 'PPC 405 I2C Algorithm' CONFIG_I2C_PPC405_ALGO $CONFIG_I2C
if [ "$CONFIG_I2C_PPC405_ALGO" != "n" ]; then
dep_tristate ' PPC 405 I2C Adapter' CONFIG_I2C_PPC405_ADAP $CONFIG_I2C_PPC405_ALGO
if [ "$CONFIG_IBM_OCP" = "y" ]; then
dep_tristate 'IBM on-chip I2C Algorithm' CONFIG_I2C_IBM_OCP_ALGO $CONFIG_I2C
if [ "$CONFIG_I2C_IBM_OCP_ALGO" != "n" ]; then
dep_tristate ' IBM on-chip I2C Adapter' CONFIG_I2C_IBM_OCP_ADAP $CONFIG_I2C_IBM_OCP_ALGO
fi
fi
......
......@@ -1107,7 +1107,6 @@ static void sym_eh_timeout(u_long p) { __sym_eh_done((Scsi_Cmnd *)p, 1); }
static int sym_eh_handler(int op, char *opname, Scsi_Cmnd *cmd)
{
hcb_p np = SYM_SOFTC_PTR(cmd);
unsigned long flags;
SYM_QUEHEAD *qp;
int to_do = SYM_EH_DO_IGNORE;
int sts = -1;
......@@ -1118,8 +1117,6 @@ static int sym_eh_handler(int op, char *opname, Scsi_Cmnd *cmd)
printf_warning("%s: %s operation started.\n", devname, opname);
SYM_LOCK_HCB(np, flags);
#if 0
/* This one should be the result of some race, thus to ignore */
if (cmd->serial_number != cmd->serial_number_at_timeout)
......@@ -1198,8 +1195,6 @@ static int sym_eh_handler(int op, char *opname, Scsi_Cmnd *cmd)
if (to_do == SYM_EH_DO_COMPLETE)
sym_xpt_done2(np, cmd, CAM_REQ_ABORTED);
SYM_UNLOCK_HCB(np, flags);
/* Wait for completion with locks released, as required by kernel */
if (to_do == SYM_EH_DO_WAIT) {
init_timer(&ep->timer);
......
......@@ -2,10 +2,10 @@
# Makefile for USB Core files and filesystem
#
export-objs := usb.o hcd.o hcd-pci.o urb.o message.o file.o
export-objs := usb.o hcd.o hcd-pci.o urb.o message.o file.o buffer.o
usbcore-objs := usb.o usb-debug.o hub.o hcd.o urb.o message.o \
config.o file.o
config.o file.o buffer.o
ifeq ($(CONFIG_PCI),y)
usbcore-objs += hcd-pci.o
......
/*
* DMA memory management for framework level HCD code (hc_driver)
*
* This implementation plugs in through generic "usb_bus" level methods,
* and works with real PCI, or when "pci device == null" makes sense.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pci.h>
#ifdef CONFIG_USB_DEBUG
#define DEBUG
#else
#undef DEBUG
#endif
#include <linux/usb.h>
#include "hcd.h"
/*
* DMA-Consistent Buffers
*/
/* FIXME tune these based on pool statistics ... */
static const size_t pool_max [HCD_BUFFER_POOLS] = {
32,
128,
512,
PAGE_SIZE / 2
/* bigger --> allocate pages */
};
/* SETUP primitives */
/**
* hcd_buffer_create - initialize buffer pools
* @hcd: the bus whose buffer pools are to be initialized
*
* Call this as part of initializing a host controller that uses the pci dma
* memory allocators. It initializes some pools of dma-consistent memory that
* will be shared by all drivers using that controller, or returns a negative
* errno value on error.
*
* Call hcd_buffer_destroy() to clean up after using those pools.
*/
int hcd_buffer_create (struct usb_hcd *hcd)
{
char name [16];
int i, size;
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
if (!(size = pool_max [i]))
continue;
snprintf (name, sizeof name, "buffer-%d", size);
hcd->pool [i] = pci_pool_create (name, hcd->pdev,
size, size, 0, SLAB_KERNEL);
if (!hcd->pool [i]) {
hcd_buffer_destroy (hcd);
return -ENOMEM;
}
}
return 0;
}
EXPORT_SYMBOL (hcd_buffer_create);
/**
* hcd_buffer_destroy - deallocate buffer pools
* @hcd: the bus whose buffer pools are to be destroyed
*
* This frees the buffer pools created by hcd_buffer_create().
*/
void hcd_buffer_destroy (struct usb_hcd *hcd)
{
int i;
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
struct pci_pool *pool = hcd->pool [i];
if (pool) {
pci_pool_destroy (pool);
hcd->pool [i] = 0;
}
}
}
EXPORT_SYMBOL (hcd_buffer_destroy);
/* sometimes alloc/free could use kmalloc with SLAB_DMA, for
* better sharing and to leverage mm/slab.c intelligence.
*/
void *hcd_buffer_alloc (
struct usb_bus *bus,
size_t size,
int mem_flags,
dma_addr_t *dma
)
{
struct usb_hcd *hcd = bus->hcpriv;
int i;
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
if (size <= pool_max [i])
return pci_pool_alloc (hcd->pool [i], mem_flags, dma);
}
return pci_alloc_consistent (hcd->pdev, size, dma);
}
void hcd_buffer_free (
struct usb_bus *bus,
size_t size,
void *addr,
dma_addr_t dma
)
{
struct usb_hcd *hcd = bus->hcpriv;
int i;
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
if (size <= pool_max [i]) {
pci_pool_free (hcd->pool [i], addr, dma);
return;
}
}
pci_free_consistent (hcd->pdev, size, addr, dma);
}
/*
* DMA-Mappings for arbitrary memory buffers
*/
int hcd_buffer_map (
struct usb_bus *bus,
void *addr,
dma_addr_t *dma,
size_t size,
int direction
) {
struct usb_hcd *hcd = bus->hcpriv;
// FIXME pci_map_single() has no standard failure mode!
*dma = pci_map_single (hcd->pdev, addr, size,
(direction == USB_DIR_IN)
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE);
return 0;
}
void hcd_buffer_dmasync (
struct usb_bus *bus,
dma_addr_t dma,
size_t size,
int direction
) {
struct usb_hcd *hcd = bus->hcpriv;
pci_dma_sync_single (hcd->pdev, dma, size,
(direction == USB_DIR_IN)
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE);
}
void hcd_buffer_unmap (
struct usb_bus *bus,
dma_addr_t dma,
size_t size,
int direction
) {
struct usb_hcd *hcd = bus->hcpriv;
pci_unmap_single (hcd->pdev, dma, size,
(direction == USB_DIR_IN)
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE);
}
// FIXME DMA-Mappings for struct scatterlist
......@@ -130,10 +130,19 @@ int usb_hcd_pci_probe (struct pci_dev *dev, const struct pci_device_id *id)
return retval;
}
}
pci_set_drvdata(dev, hcd);
pci_set_drvdata (dev, hcd);
hcd->driver = driver;
hcd->description = driver->description;
hcd->pdev = dev;
hcd->self.bus_name = dev->slot_name;
hcd->product_desc = dev->name;
if ((retval = hcd_buffer_create (hcd)) != 0) {
clean_3:
driver->hcd_free (hcd);
goto clean_2;
}
info ("%s @ %s, %s", hcd->description, dev->slot_name, dev->name);
pci_read_config_byte (dev, PCI_LATENCY_TIMER, &latency);
......@@ -154,8 +163,7 @@ int usb_hcd_pci_probe (struct pci_dev *dev, const struct pci_device_id *id)
!= 0) {
err ("request interrupt %s failed", bufp);
retval = -EBUSY;
driver->hcd_free (hcd);
goto clean_2;
goto clean_3;
}
hcd->irq = dev->irq;
......@@ -168,8 +176,6 @@ int usb_hcd_pci_probe (struct pci_dev *dev, const struct pci_device_id *id)
usb_bus_init (&hcd->self);
hcd->self.op = &usb_hcd_operations;
hcd->self.hcpriv = (void *) hcd;
hcd->self.bus_name = dev->slot_name;
hcd->product_desc = dev->name;
INIT_LIST_HEAD (&hcd->dev_list);
......@@ -216,6 +222,7 @@ void usb_hcd_pci_remove (struct pci_dev *dev)
usb_disconnect (&hub);
hcd->driver->stop (hcd);
hcd_buffer_destroy (hcd);
hcd->state = USB_STATE_HALT;
free_irq (hcd->irq, hcd);
......
......@@ -454,7 +454,6 @@ static int rh_status_urb (struct usb_hcd *hcd, struct urb *urb)
/* rh_timer protected by hcd_data_lock */
if (timer_pending (&hcd->rh_timer)
|| urb->status != -EINPROGRESS
|| !HCD_IS_RUNNING (hcd->state)
|| urb->transfer_buffer_length < len) {
dbg ("not queuing status urb, stat %d", urb->status);
return -EINVAL;
......@@ -508,8 +507,12 @@ static void rh_report_status (unsigned long ptr)
BUG ();
}
spin_unlock_irqrestore (&hcd_data_lock, flags);
} else
} else {
spin_unlock_irqrestore (&urb->lock, flags);
spin_lock_irqsave (&hcd_data_lock, flags);
rh_status_urb (hcd, urb);
spin_unlock_irqrestore (&hcd_data_lock, flags);
}
} else {
/* this urb's been unlinked */
urb->hcpriv = 0;
......@@ -1245,6 +1248,11 @@ struct usb_operations usb_hcd_operations = {
.submit_urb = hcd_submit_urb,
.unlink_urb = hcd_unlink_urb,
.deallocate = hcd_free_dev,
.buffer_alloc = hcd_buffer_alloc,
.buffer_free = hcd_buffer_free,
.buffer_map = hcd_buffer_map,
.buffer_dmasync = hcd_buffer_dmasync,
.buffer_unmap = hcd_buffer_unmap,
};
EXPORT_SYMBOL (usb_hcd_operations);
......
......@@ -58,6 +58,9 @@ struct usb_hcd { /* usb_bus.hcpriv points to this */
atomic_t resume_count; /* multiple resumes issue */
#endif
#define HCD_BUFFER_POOLS 4
struct pci_pool *pool [HCD_BUFFER_POOLS];
int state;
# define __ACTIVE 0x01
# define __SLEEPY 0x02
......@@ -109,6 +112,25 @@ struct usb_operations {
int (*get_frame_number) (struct usb_device *usb_dev);
int (*submit_urb) (struct urb *urb, int mem_flags);
int (*unlink_urb) (struct urb *urb);
/* allocate dma-consistent buffer for URB_DMA_NOMAPPING */
void *(*buffer_alloc)(struct usb_bus *bus, size_t size,
int mem_flags,
dma_addr_t *dma);
void (*buffer_free)(struct usb_bus *bus, size_t size,
void *addr, dma_addr_t dma);
int (*buffer_map) (struct usb_bus *bus,
void *addr, dma_addr_t *dma,
size_t size, int direction);
void (*buffer_dmasync) (struct usb_bus *bus,
dma_addr_t dma,
size_t size, int direction);
void (*buffer_unmap) (struct usb_bus *bus,
dma_addr_t dma,
size_t size, int direction);
// FIXME also: buffer_sg_map (), buffer_sg_unmap ()
};
/* each driver provides one of these, and hardware init support */
......@@ -181,6 +203,25 @@ extern int usb_hcd_pci_resume (struct pci_dev *dev);
#endif /* CONFIG_PCI */
/* pci-ish (pdev null is ok) buffer alloc/mapping support */
int hcd_buffer_create (struct usb_hcd *hcd);
void hcd_buffer_destroy (struct usb_hcd *hcd);
void *hcd_buffer_alloc (struct usb_bus *bus, size_t size,
int mem_flags, dma_addr_t *dma);
void hcd_buffer_free (struct usb_bus *bus, size_t size,
void *addr, dma_addr_t dma);
int hcd_buffer_map (struct usb_bus *bus,
void *addr, dma_addr_t *dma,
size_t size, int direction);
void hcd_buffer_dmasync (struct usb_bus *bus,
dma_addr_t dma,
size_t size, int direction);
void hcd_buffer_unmap (struct usb_bus *bus,
dma_addr_t dma,
size_t size, int direction);
/* generic bus glue, needed for host controllers that don't use PCI */
extern struct usb_operations usb_hcd_operations;
extern void usb_hcd_irq (int irq, void *__hcd, struct pt_regs *r);
......
......@@ -863,9 +863,11 @@ static ssize_t show_product (struct device *dev, char *buf, size_t count, loff_t
return 0;
udev = to_usb_device (dev);
len = usb_string(udev, udev->descriptor.iProduct, buf, PAGE_SIZE);
len = usb_string(udev, udev->descriptor.iProduct, buf, PAGE_SIZE);
if (len < 0)
return 0;
buf[len] = '\n';
buf[len+1] = 0x00;
buf[len+1] = 0;
return len+1;
}
static DEVICE_ATTR(product,"product",S_IRUGO,show_product,NULL);
......@@ -881,9 +883,11 @@ show_manufacturer (struct device *dev, char *buf, size_t count, loff_t off)
return 0;
udev = to_usb_device (dev);
len = usb_string(udev, udev->descriptor.iManufacturer, buf, PAGE_SIZE);
len = usb_string(udev, udev->descriptor.iManufacturer, buf, PAGE_SIZE);
if (len < 0)
return 0;
buf[len] = '\n';
buf[len+1] = 0x00;
buf[len+1] = 0;
return len+1;
}
static DEVICE_ATTR(manufacturer,"manufacturer",S_IRUGO,show_manufacturer,NULL);
......@@ -899,9 +903,11 @@ show_serial (struct device *dev, char *buf, size_t count, loff_t off)
return 0;
udev = to_usb_device (dev);
len = usb_string(udev, udev->descriptor.iSerialNumber, buf, PAGE_SIZE);
len = usb_string(udev, udev->descriptor.iSerialNumber, buf, PAGE_SIZE);
if (len < 0)
return 0;
buf[len] = '\n';
buf[len+1] = 0x00;
buf[len+1] = 0;
return len+1;
}
static DEVICE_ATTR(serial,"serial",S_IRUGO,show_serial,NULL);
......@@ -918,13 +924,13 @@ static void usb_find_drivers(struct usb_device *dev)
unsigned claimed = 0;
/* FIXME should get called for each new configuration not just the
* first one for a device. switching configs (or altesettings) should
* first one for a device. switching configs (or altsettings) should
* undo driverfs and HCD state for the previous interfaces.
*/
for (ifnum = 0; ifnum < dev->actconfig->bNumInterfaces; ifnum++) {
struct usb_interface *interface = &dev->actconfig->interface[ifnum];
struct usb_interface_descriptor *desc = interface->altsetting;
/* register this interface with driverfs */
interface->dev.parent = &dev->dev;
interface->dev.bus = &usb_bus_type;
......@@ -1455,6 +1461,152 @@ int usb_new_device(struct usb_device *dev)
}
/**
* usb_buffer_alloc - allocate dma-consistent buffer for URB_NO_DMA_MAP
* @dev: device the buffer will be used with
* @size: requested buffer size
* @mem_flags: affect whether allocation may block
* @dma: used to return DMA address of buffer
*
* Return value is either null (indicating no buffer could be allocated), or
* the cpu-space pointer to a buffer that may be used to perform DMA to the
* specified device. Such cpu-space buffers are returned along with the DMA
* address (through the pointer provided).
*
* These buffers are used with URB_NO_DMA_MAP set in urb->transfer_flags to
* avoid behaviors like using "DMA bounce buffers", or tying down I/O mapping
* hardware for long idle periods. The implementation varies between
* platforms, depending on details of how DMA will work to this device.
*
* When the buffer is no longer used, free it with usb_buffer_free().
*/
void *usb_buffer_alloc (
struct usb_device *dev,
size_t size,
int mem_flags,
dma_addr_t *dma
)
{
if (!dev || !dev->bus || !dev->bus->op || !dev->bus->op->buffer_alloc)
return 0;
return dev->bus->op->buffer_alloc (dev->bus, size, mem_flags, dma);
}
/**
* usb_buffer_free - free memory allocated with usb_buffer_alloc()
* @dev: device the buffer was used with
* @size: requested buffer size
* @addr: CPU address of buffer
* @dma: DMA address of buffer
*
* This reclaims an I/O buffer, letting it be reused. The memory must have
* been allocated using usb_buffer_alloc(), and the parameters must match
* those provided in that allocation request.
*/
void usb_buffer_free (
struct usb_device *dev,
size_t size,
void *addr,
dma_addr_t dma
)
{
if (!dev || !dev->bus || !dev->bus->op || !dev->bus->op->buffer_free)
return;
dev->bus->op->buffer_free (dev->bus, size, addr, dma);
}
/**
* usb_buffer_map - create DMA mapping(s) for an urb
* @urb: urb whose transfer_buffer will be mapped
*
* Return value is either null (indicating no buffer could be mapped), or
* the parameter. URB_NO_DMA_MAP is added to urb->transfer_flags if the
* operation succeeds.
*
* This call would normally be used for an urb which is reused, perhaps
* as the target of a large periodic transfer, with usb_buffer_dmasync()
* calls to synchronize memory and dma state. It may not be used for
* control requests.
*
* Reverse the effect of this call with usb_buffer_unmap().
*/
struct urb *usb_buffer_map (struct urb *urb)
{
struct usb_bus *bus;
struct usb_operations *op;
if (!urb
|| usb_pipecontrol (urb->pipe)
|| !urb->dev
|| !(bus = urb->dev->bus)
|| !(op = bus->op)
|| !op->buffer_map)
return 0;
if (op->buffer_map (bus,
urb->transfer_buffer,
&urb->transfer_dma,
urb->transfer_buffer_length,
usb_pipein (urb->pipe)
? USB_DIR_IN
: USB_DIR_OUT))
return 0;
urb->transfer_flags |= URB_NO_DMA_MAP;
return urb;
}
/**
* usb_buffer_dmasync - synchronize DMA and CPU view of buffer(s)
* @urb: urb whose transfer_buffer will be synchronized
*/
void usb_buffer_dmasync (struct urb *urb)
{
struct usb_bus *bus;
struct usb_operations *op;
if (!urb
|| !(urb->transfer_flags & URB_NO_DMA_MAP)
|| !urb->dev
|| !(bus = urb->dev->bus)
|| !(op = bus->op)
|| !op->buffer_dmasync)
return;
op->buffer_dmasync (bus,
urb->transfer_dma,
urb->transfer_buffer_length,
usb_pipein (urb->pipe)
? USB_DIR_IN
: USB_DIR_OUT);
}
/**
* usb_buffer_unmap - free DMA mapping(s) for an urb
* @urb: urb whose transfer_buffer will be unmapped
*
* Reverses the effect of usb_buffer_map().
*/
void usb_buffer_unmap (struct urb *urb)
{
struct usb_bus *bus;
struct usb_operations *op;
if (!urb
|| !(urb->transfer_flags & URB_NO_DMA_MAP)
|| !urb->dev
|| !(bus = urb->dev->bus)
|| !(op = bus->op)
|| !op->buffer_unmap)
return;
op->buffer_unmap (bus,
urb->transfer_dma,
urb->transfer_buffer_length,
usb_pipein (urb->pipe)
? USB_DIR_IN
: USB_DIR_OUT);
}
#ifdef CONFIG_PROC_FS
struct list_head *usb_driver_get_list(void)
{
......@@ -1534,4 +1686,11 @@ EXPORT_SYMBOL(__usb_get_extra_descriptor);
EXPORT_SYMBOL(usb_get_current_frame_number);
EXPORT_SYMBOL (usb_buffer_alloc);
EXPORT_SYMBOL (usb_buffer_free);
EXPORT_SYMBOL (usb_buffer_map);
EXPORT_SYMBOL (usb_buffer_dmasync);
EXPORT_SYMBOL (usb_buffer_unmap);
MODULE_LICENSE("GPL");
/*
* Copyright (c) 2001 by David Brownell
* Copyright (c) 2001-2002 by David Brownell
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
......@@ -175,3 +175,215 @@ dbg_qh (char *label, struct ehci_hcd *ehci, struct ehci_qh *qh) {}
(status & PORT_CONNECT) ? " CONNECT" : "" \
)
#ifdef DEBUG
#define speed_char(info1) ({ char tmp; \
switch (info1 & (3 << 12)) { \
case 0 << 12: tmp = 'f'; break; \
case 1 << 12: tmp = 'l'; break; \
case 2 << 12: tmp = 'h'; break; \
default: tmp = '?'; break; \
}; tmp; })
static ssize_t
show_async (struct device *dev, char *buf, size_t count, loff_t off)
{
struct pci_dev *pdev;
struct ehci_hcd *ehci;
unsigned long flags;
unsigned temp, size;
char *next;
struct ehci_qh *qh;
if (off != 0)
return 0;
pdev = container_of (dev, struct pci_dev, dev);
ehci = container_of (pci_get_drvdata (pdev), struct ehci_hcd, hcd);
next = buf;
size = count;
/* dumps a snapshot of the async schedule.
* usually empty except for long-term bulk reads, or head.
* one QH per line, and TDs we know about
*/
spin_lock_irqsave (&ehci->lock, flags);
if (ehci->async) {
qh = ehci->async;
do {
u32 scratch;
struct list_head *entry;
struct ehci_qtd *td;
scratch = cpu_to_le32p (&qh->hw_info1);
temp = snprintf (next, size, "qh %p dev%d %cs ep%d",
qh, scratch & 0x007f,
speed_char (scratch),
(scratch >> 8) & 0x000f);
size -= temp;
next += temp;
list_for_each (entry, &qh->qtd_list) {
td = list_entry (entry, struct ehci_qtd,
qtd_list);
scratch = cpu_to_le32p (&td->hw_token);
temp = snprintf (next, size,
", td %p len=%d %s",
td, scratch >> 16,
({ char *tmp;
switch ((scratch>>8)&0x03) {
case 0: tmp = "out"; break;
case 1: tmp = "in"; break;
case 2: tmp = "setup"; break;
default: tmp = "?"; break;
} tmp;})
);
size -= temp;
next += temp;
}
temp = snprintf (next, size, "\n");
size -= temp;
next += temp;
} while ((qh = qh->qh_next.qh) != ehci->async);
}
spin_unlock_irqrestore (&ehci->lock, flags);
return count - size;
}
static DEVICE_ATTR (async, "sched-async", S_IRUSR, show_async, NULL);
#define DBG_SCHED_LIMIT 64
static ssize_t
show_periodic (struct device *dev, char *buf, size_t count, loff_t off)
{
struct pci_dev *pdev;
struct ehci_hcd *ehci;
unsigned long flags;
union ehci_shadow p, *seen;
unsigned temp, size, seen_count;
char *next;
unsigned i, tag;
if (off != 0)
return 0;
if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, SLAB_ATOMIC)))
return 0;
seen_count = 0;
pdev = container_of (dev, struct pci_dev, dev);
ehci = container_of (pci_get_drvdata (pdev), struct ehci_hcd, hcd);
next = buf;
size = count;
temp = snprintf (next, size, "size = %d\n", ehci->periodic_size);
size -= temp;
next += temp;
/* dump a snapshot of the periodic schedule.
* iso changes, interrupt usually doesn't.
*/
spin_lock_irqsave (&ehci->lock, flags);
for (i = 0; i < ehci->periodic_size; i++) {
p = ehci->pshadow [i];
if (!p.ptr)
continue;
tag = Q_NEXT_TYPE (ehci->periodic [i]);
temp = snprintf (next, size, "%4d: ", i);
size -= temp;
next += temp;
do {
switch (tag) {
case Q_TYPE_QH:
temp = snprintf (next, size, " intr-%d %p",
p.qh->period, p.qh);
size -= temp;
next += temp;
for (temp = 0; temp < seen_count; temp++) {
if (seen [temp].ptr == p.ptr)
break;
}
/* show more info the first time around */
if (temp == seen_count) {
u32 scratch = cpu_to_le32p (
&p.qh->hw_info1);
temp = snprintf (next, size,
" (%cs dev%d ep%d)",
speed_char (scratch),
scratch & 0x007f,
(scratch >> 8) & 0x000f);
/* FIXME TDs too */
if (seen_count < DBG_SCHED_LIMIT)
seen [seen_count++].qh = p.qh;
} else
temp = 0;
tag = Q_NEXT_TYPE (p.qh->hw_next);
p = p.qh->qh_next;
break;
case Q_TYPE_FSTN:
temp = snprintf (next, size,
" fstn-%8x/%p", p.fstn->hw_prev,
p.fstn);
tag = Q_NEXT_TYPE (p.fstn->hw_next);
p = p.fstn->fstn_next;
break;
case Q_TYPE_ITD:
temp = snprintf (next, size,
" itd/%p", p.itd);
tag = Q_NEXT_TYPE (p.itd->hw_next);
p = p.itd->itd_next;
break;
case Q_TYPE_SITD:
temp = snprintf (next, size,
" sitd/%p", p.sitd);
tag = Q_NEXT_TYPE (p.sitd->hw_next);
p = p.sitd->sitd_next;
break;
}
size -= temp;
next += temp;
} while (p.ptr);
temp = snprintf (next, size, "\n");
size -= temp;
next += temp;
}
spin_unlock_irqrestore (&ehci->lock, flags);
kfree (seen);
return count - size;
}
static DEVICE_ATTR (periodic, "sched-periodic", S_IRUSR, show_periodic, NULL);
#undef DBG_SCHED_LIMIT
static inline void create_debug_files (struct ehci_hcd *bus)
{
device_create_file (&bus->hcd.pdev->dev, &dev_attr_async);
device_create_file (&bus->hcd.pdev->dev, &dev_attr_periodic);
}
static inline void remove_debug_files (struct ehci_hcd *bus)
{
device_remove_file (&bus->hcd.pdev->dev, &dev_attr_async);
device_remove_file (&bus->hcd.pdev->dev, &dev_attr_periodic);
}
#else /* DEBUG */
static inline void create_debug_files (struct ehci_hcd *bus)
{
}
static inline void remove_debug_files (struct ehci_hcd *bus)
{
}
#endif /* DEBUG */
......@@ -65,6 +65,8 @@
*
* HISTORY:
*
* 2002-08-06 Handling for bulk and interrupt transfers is mostly shared;
* only scheduling is different, no arbitrary limitations.
* 2002-07-25 Sanity check PCI reads, mostly for better cardbus support,
* clean up HC run state handshaking.
* 2002-05-24 Preliminary FS/LS interrupts, using scheduling shortcuts
......@@ -85,7 +87,7 @@
* 2001-June Works with usb-storage and NEC EHCI on 2.4
*/
#define DRIVER_VERSION "2002-Jul-25"
#define DRIVER_VERSION "2002-Aug-06"
#define DRIVER_AUTHOR "David Brownell"
#define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver"
......@@ -93,6 +95,8 @@
// #define EHCI_VERBOSE_DEBUG
// #define have_split_iso
#define INTR_AUTOMAGIC /* to be removed later in 2.5 */
/* magic numbers that can affect system performance */
#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
#define EHCI_TUNE_RL_HS 0 /* nak throttle; see 4.9 */
......@@ -376,6 +380,8 @@ static int ehci_start (struct usb_hcd *hcd)
return -ENOMEM;
}
create_debug_files (ehci);
/*
* Start, enabling full USB 2.0 functionality ... usb 1.1 devices
* are explicitly handed to companion controller(s), so no TT is
......@@ -429,6 +435,8 @@ static void ehci_stop (struct usb_hcd *hcd)
ehci_ready (ehci);
ehci_reset (ehci);
remove_debug_files (ehci);
/* root hub is shut down separately (first, when possible) */
tasklet_disable (&ehci->tasklet);
ehci_tasklet ((unsigned long) ehci);
......@@ -614,7 +622,8 @@ static void ehci_irq (struct usb_hcd *hcd)
*
* hcd-specific init for hcpriv hasn't been done yet
*
* NOTE: EHCI queues control and bulk requests transparently, like OHCI.
* NOTE: control, bulk, and interrupt share the same code to append TDs
* to a (possibly active) QH, and the same QH scanning code.
*/
static int ehci_urb_enqueue (
struct usb_hcd *hcd,
......@@ -626,10 +635,11 @@ static int ehci_urb_enqueue (
urb->transfer_flags &= ~EHCI_STATE_UNLINK;
INIT_LIST_HEAD (&qtd_list);
switch (usb_pipetype (urb->pipe)) {
case PIPE_CONTROL:
case PIPE_BULK:
switch (usb_pipetype (urb->pipe)) {
// case PIPE_CONTROL:
// case PIPE_BULK:
default:
if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
return -ENOMEM;
return submit_async (ehci, urb, &qtd_list, mem_flags);
......@@ -649,9 +659,6 @@ static int ehci_urb_enqueue (
dbg ("no split iso support yet");
return -ENOSYS;
#endif /* have_split_iso */
default: /* can't happen */
return -ENOSYS;
}
}
......@@ -665,15 +672,16 @@ static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
unsigned long flags;
dbg ("%s urb_dequeue %p qh state %d",
hcd->self.bus_name, urb, qh->qh_state);
dbg ("%s urb_dequeue %p qh %p state %d",
hcd->self.bus_name, urb, qh, qh->qh_state);
switch (usb_pipetype (urb->pipe)) {
case PIPE_CONTROL:
case PIPE_BULK:
// case PIPE_CONTROL:
// case PIPE_BULK:
default:
spin_lock_irqsave (&ehci->lock, flags);
if (ehci->reclaim) {
dbg ("dq: reclaim busy, %s", RUN_CONTEXT);
dbg ("dq: reclaim busy, %s", RUN_CONTEXT);
if (in_interrupt ()) {
spin_unlock_irqrestore (&ehci->lock, flags);
return -EAGAIN;
......@@ -683,28 +691,43 @@ dbg ("dq: reclaim busy, %s", RUN_CONTEXT);
&& ehci->hcd.state != USB_STATE_HALT
) {
spin_unlock_irqrestore (&ehci->lock, flags);
// yeech ... this could spin for up to two frames!
dbg ("wait for dequeue: state %d, reclaim %p, hcd state %d",
qh->qh_state, ehci->reclaim, ehci->hcd.state
);
udelay (100);
/* let pending unlinks complete */
wait_ms (1);
spin_lock_irqsave (&ehci->lock, flags);
}
}
if (qh->qh_state == QH_STATE_LINKED)
start_unlink_async (ehci, qh);
spin_unlock_irqrestore (&ehci->lock, flags);
return 0;
break;
case PIPE_INTERRUPT:
intr_deschedule (ehci, urb->start_frame, qh,
(urb->dev->speed == USB_SPEED_HIGH)
? urb->interval
: (urb->interval << 3));
if (ehci->hcd.state == USB_STATE_HALT)
urb->status = -ESHUTDOWN;
qh_completions (ehci, qh, 1);
return 0;
if (qh->qh_state == QH_STATE_LINKED) {
/* messy, can spin or block a microframe ... */
intr_deschedule (ehci, qh, 1);
/* qh_state == IDLE */
}
qh_completions (ehci, qh);
/* reschedule QH iff another request is queued */
if (!list_empty (&qh->qtd_list)
&& HCD_IS_RUNNING (ehci->hcd.state)) {
int status;
spin_lock_irqsave (&ehci->lock, flags);
status = qh_schedule (ehci, qh);
spin_unlock_irqrestore (&ehci->lock, flags);
if (status != 0) {
// shouldn't happen often, but ...
// FIXME kill those tds' urbs
err ("can't reschedule qh %p, err %d",
qh, status);
}
return status;
}
break;
case PIPE_ISOCHRONOUS:
// itd or sitd ...
......@@ -712,9 +735,9 @@ dbg ("wait for dequeue: state %d, reclaim %p, hcd state %d",
// wait till next completion, do it then.
// completion irqs can wait up to 1024 msec,
urb->transfer_flags |= EHCI_STATE_UNLINK;
return 0;
break;
}
return -EINVAL;
return 0;
}
/*-------------------------------------------------------------------------*/
......@@ -728,6 +751,7 @@ static void ehci_free_config (struct usb_hcd *hcd, struct usb_device *udev)
int i;
unsigned long flags;
/* ASSERT: no requests/urbs are still linked (so no TDs) */
/* ASSERT: nobody can be submitting urbs for this any more */
dbg ("%s: free_config devnum %d", hcd->self.bus_name, udev->devnum);
......@@ -736,34 +760,57 @@ static void ehci_free_config (struct usb_hcd *hcd, struct usb_device *udev)
for (i = 0; i < 32; i++) {
if (dev->ep [i]) {
struct ehci_qh *qh;
char *why;
/* dev->ep never has ITDs or SITDs */
qh = (struct ehci_qh *) dev->ep [i];
vdbg ("free_config, ep 0x%02x qh %p", i, qh);
if (!list_empty (&qh->qtd_list)) {
dbg ("ep 0x%02x qh %p not empty!", i, qh);
/* detect/report non-recoverable errors */
if (in_interrupt ())
why = "disconnect() didn't";
else if ((qh->hw_info2 & cpu_to_le32 (0xffff)) != 0
&& qh->qh_state != QH_STATE_IDLE)
why = "(active periodic)";
else
why = 0;
if (why) {
err ("dev %s-%s ep %d-%s error: %s",
hcd->self.bus_name, udev->devpath,
i & 0xf, (i & 0x10) ? "IN" : "OUT",
why);
BUG ();
}
dev->ep [i] = 0;
/* wait_ms() won't spin here -- we're a thread */
dev->ep [i] = 0;
if (qh->qh_state == QH_STATE_IDLE)
goto idle;
dbg ("free_config, async ep 0x%02x qh %p", i, qh);
/* scan_async() empties the ring as it does its work,
* using IAA, but doesn't (yet?) turn it off. if it
* doesn't empty this qh, likely it's the last entry.
*/
while (qh->qh_state == QH_STATE_LINKED
&& ehci->reclaim
&& ehci->hcd.state != USB_STATE_HALT
) {
spin_unlock_irqrestore (&ehci->lock, flags);
/* wait_ms() won't spin, we're a thread;
* and we know IRQ+tasklet can progress
*/
wait_ms (1);
spin_lock_irqsave (&ehci->lock, flags);
}
if (qh->qh_state == QH_STATE_LINKED) {
if (qh->qh_state == QH_STATE_LINKED)
start_unlink_async (ehci, qh);
while (qh->qh_state != QH_STATE_IDLE) {
spin_unlock_irqrestore (&ehci->lock,
flags);
wait_ms (1);
spin_lock_irqsave (&ehci->lock, flags);
}
while (qh->qh_state != QH_STATE_IDLE
&& ehci->hcd.state != USB_STATE_HALT) {
spin_unlock_irqrestore (&ehci->lock,
flags);
wait_ms (1);
spin_lock_irqsave (&ehci->lock, flags);
}
idle:
qh_put (ehci, qh);
}
}
......
......@@ -47,9 +47,11 @@ static int
qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len, int token)
{
int i, count;
u64 addr = buf;
/* one buffer entry per 4K ... first might be short or unaligned */
qtd->hw_buf [0] = cpu_to_le32 (buf);
qtd->hw_buf [0] = cpu_to_le32 ((u32)addr);
qtd->hw_buf_hi [0] = cpu_to_le32 ((u32)(addr >> 32));
count = 0x1000 - (buf & 0x0fff); /* rest of that page */
if (likely (len < count)) /* ... iff needed */
count = len;
......@@ -59,7 +61,7 @@ qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len, int token)
/* per-qtd limit: from 16K to 20K (best alignment) */
for (i = 1; count < len && i < 5; i++) {
u64 addr = buf;
addr = buf;
qtd->hw_buf [i] = cpu_to_le32 ((u32)addr);
qtd->hw_buf_hi [i] = cpu_to_le32 ((u32)(addr >> 32));
buf += 0x1000;
......@@ -157,30 +159,6 @@ static inline void qtd_copy_status (struct urb *urb, size_t length, u32 token)
}
}
static void ehci_urb_complete (
struct ehci_hcd *ehci,
dma_addr_t addr,
struct urb *urb
) {
if (urb->transfer_buffer_length && usb_pipein (urb->pipe))
pci_dma_sync_single (ehci->hcd.pdev, addr,
urb->transfer_buffer_length,
PCI_DMA_FROMDEVICE);
/* cleanse status if we saw no error */
if (likely (urb->status == -EINPROGRESS)) {
if (urb->actual_length != urb->transfer_buffer_length
&& (urb->transfer_flags & URB_SHORT_NOT_OK))
urb->status = -EREMOTEIO;
else
urb->status = 0;
}
/* only report unlinks once */
if (likely (urb->status != -ENOENT && urb->status != -ENOTCONN))
urb->complete (urb);
}
/* urb->lock ignored from here on (hcd is done with urb) */
static void ehci_urb_done (
......@@ -188,6 +166,11 @@ static void ehci_urb_done (
dma_addr_t addr,
struct urb *urb
) {
#ifdef INTR_AUTOMAGIC
struct urb *resubmit = 0;
struct usb_device *dev = 0;
#endif
if (urb->transfer_buffer_length)
pci_unmap_single (ehci->hcd.pdev,
addr,
......@@ -196,7 +179,23 @@ static void ehci_urb_done (
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE);
if (likely (urb->hcpriv != 0)) {
qh_put (ehci, (struct ehci_qh *) urb->hcpriv);
struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
/* S-mask in a QH means it's an interrupt urb */
if ((qh->hw_info2 & cpu_to_le32 (0x00ff)) != 0) {
/* ... update hc-wide periodic stats (for usbfs) */
ehci->hcd.self.bandwidth_int_reqs--;
#ifdef INTR_AUTOMAGIC
if (!((urb->status == -ENOENT)
|| (urb->status == -ECONNRESET))) {
resubmit = usb_get_urb (urb);
dev = urb->dev;
}
#endif
}
qh_put (ehci, qh);
urb->hcpriv = 0;
}
......@@ -208,33 +207,46 @@ static void ehci_urb_done (
urb->status = 0;
}
/* hand off urb ownership */
usb_hcd_giveback_urb (&ehci->hcd, urb);
#ifdef INTR_AUTOMAGIC
if (resubmit && ((urb->status == -ENOENT)
|| (urb->status == -ECONNRESET))) {
usb_put_urb (resubmit);
resubmit = 0;
}
// device drivers will soon be doing something like this
if (resubmit) {
int status;
resubmit->dev = dev;
status = usb_submit_urb (resubmit, SLAB_KERNEL);
if (status != 0)
err ("can't resubmit interrupt urb %p: status %d",
resubmit, status);
usb_put_urb (resubmit);
}
#endif
}
/*
* Process completed qtds for a qh, issuing completions if needed.
* When freeing: frees qtds, unmaps buf, returns URB to driver.
* When not freeing (queued periodic qh): retain qtds, mapping, and urb.
* Frees qtds, unmaps buf, returns URB to driver.
* Races up to qh->hw_current; returns number of urb completions.
*/
static int
qh_completions (
struct ehci_hcd *ehci,
struct ehci_qh *qh,
int freeing
) {
static void
qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
struct ehci_qtd *qtd, *last;
struct list_head *next, *qtd_list = &qh->qtd_list;
int unlink = 0, halted = 0;
unsigned long flags;
int retval = 0;
spin_lock_irqsave (&ehci->lock, flags);
if (unlikely (list_empty (qtd_list))) {
spin_unlock_irqrestore (&ehci->lock, flags);
return retval;
return;
}
/* scan QTDs till end of list, or we reach an active one */
......@@ -251,14 +263,8 @@ qh_completions (
if (likely (last->urb != urb)) {
/* complete() can reenter this HCD */
spin_unlock_irqrestore (&ehci->lock, flags);
if (likely (freeing != 0))
ehci_urb_done (ehci, last->buf_dma,
last->urb);
else
ehci_urb_complete (ehci, last->buf_dma,
last->urb);
ehci_urb_done (ehci, last->buf_dma, last->urb);
spin_lock_irqsave (&ehci->lock, flags);
retval++;
}
/* qh overlays can have HC's old cached copies of
......@@ -270,8 +276,7 @@ qh_completions (
qh->hw_qtd_next = last->hw_next;
}
if (likely (freeing != 0))
ehci_qtd_free (ehci, last);
ehci_qtd_free (ehci, last);
last = 0;
}
next = qtd->qtd_list.next;
......@@ -288,7 +293,7 @@ qh_completions (
/* fault: unlink the rest, since this qtd saw an error? */
if (unlikely ((token & QTD_STS_HALT) != 0)) {
freeing = unlink = 1;
unlink = 1;
/* status copied below */
/* QH halts only because of fault (above) or unlink (here). */
......@@ -296,13 +301,14 @@ qh_completions (
/* unlinking everything because of HC shutdown? */
if (ehci->hcd.state == USB_STATE_HALT) {
freeing = unlink = 1;
unlink = 1;
/* explicit unlink, maybe starting here? */
} else if (qh->qh_state == QH_STATE_IDLE
&& (urb->status == -ECONNRESET
|| urb->status == -ESHUTDOWN
|| urb->status == -ENOENT)) {
freeing = unlink = 1;
unlink = 1;
/* QH halted to unlink urbs _after_ this? */
} else if (!unlink && (token & QTD_STS_ACTIVE) != 0) {
......@@ -312,7 +318,7 @@ qh_completions (
/* unlink the rest? once we start unlinking, after
* a fault or explicit unlink, we unlink all later
* urbs. usb spec requires that.
* urbs. usb spec requires that for faults...
*/
if (unlink && urb->status == -EINPROGRESS)
urb->status = -ECONNRESET;
......@@ -330,31 +336,7 @@ qh_completions (
qtd_copy_status (urb, qtd->length, token);
spin_unlock (&urb->lock);
/*
* NOTE: this won't work right with interrupt urbs that
* need multiple qtds ... only the first scan of qh->qtd_list
* starts at the right qtd, yet multiple scans could happen
* for transfers that are scheduled across multiple uframes.
* (Such schedules are not currently allowed!)
*/
if (likely (freeing != 0))
list_del (&qtd->qtd_list);
else {
/* restore everything the HC could change
* from an interrupt QTD
*/
qtd->hw_token = (qtd->hw_token
& __constant_cpu_to_le32 (0x8300))
| cpu_to_le32 (qtd->length << 16)
| __constant_cpu_to_le32 (QTD_STS_ACTIVE
| (EHCI_TUNE_CERR << 10));
qtd->hw_buf [0] &= ~__constant_cpu_to_le32 (0x0fff);
/* this offset, and the length above,
* are likely wrong on QTDs #2..N
*/
qtd->hw_buf [0] |= cpu_to_le32 (0x0fff & qtd->buf_dma);
}
list_del (&qtd->qtd_list);
#if 0
if (urb->status == -EINPROGRESS)
......@@ -382,14 +364,9 @@ qh_completions (
/* last urb's completion might still need calling */
if (likely (last != 0)) {
if (likely (freeing != 0)) {
ehci_urb_done (ehci, last->buf_dma, last->urb);
ehci_qtd_free (ehci, last);
} else
ehci_urb_complete (ehci, last->buf_dma, last->urb);
retval++;
ehci_urb_done (ehci, last->buf_dma, last->urb);
ehci_qtd_free (ehci, last);
}
return retval;
}
/*-------------------------------------------------------------------------*/
......@@ -450,6 +427,7 @@ qh_urb_transaction (
struct ehci_qtd *qtd, *qtd_prev;
dma_addr_t buf, map_buf;
int len, maxpacket;
int is_input;
u32 token;
/*
......@@ -495,10 +473,11 @@ qh_urb_transaction (
* data transfer stage: buffer setup
*/
len = urb->transfer_buffer_length;
is_input = usb_pipein (urb->pipe);
if (likely (len > 0)) {
buf = map_buf = pci_map_single (ehci->hcd.pdev,
urb->transfer_buffer, len,
usb_pipein (urb->pipe)
is_input
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE);
if (unlikely (!buf))
......@@ -506,12 +485,11 @@ qh_urb_transaction (
} else
buf = map_buf = 0;
if (!buf || usb_pipein (urb->pipe))
if (!buf || is_input)
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
maxpacket = usb_maxpacket (urb->dev, urb->pipe,
usb_pipeout (urb->pipe));
maxpacket = usb_maxpacket (urb->dev, urb->pipe, !is_input) & 0x03ff;
/*
* buffer gets wrapped in one or more qtds;
......@@ -607,6 +585,11 @@ clear_toggle (struct usb_device *udev, int ep, int is_out, struct ehci_qh *qh)
// That'd mean updating how usbcore talks to HCDs. (2.5?)
// high bandwidth multiplier, as encoded in highspeed endpoint descriptors
#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
// ... and packet size, for any kind of endpoint descriptor
#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x03ff)
/*
* Each QH holds a qtd list; a QH is used for everything except iso.
*
......@@ -624,6 +607,8 @@ ehci_qh_make (
) {
struct ehci_qh *qh = ehci_qh_alloc (ehci, flags);
u32 info1 = 0, info2 = 0;
int is_input, type;
int maxp = 0;
if (!qh)
return qh;
......@@ -634,6 +619,53 @@ ehci_qh_make (
info1 |= usb_pipeendpoint (urb->pipe) << 8;
info1 |= usb_pipedevice (urb->pipe) << 0;
is_input = usb_pipein (urb->pipe);
type = usb_pipetype (urb->pipe);
maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
/* Compute interrupt scheduling parameters just once, and save.
* - allowing for high bandwidth, how many nsec/uframe are used?
* - split transactions need a second CSPLIT uframe; same question
* - splits also need a schedule gap (for full/low speed I/O)
* - qh has a polling interval
*
* For control/bulk requests, the HC or TT handles these.
*/
if (type == PIPE_INTERRUPT) {
qh->usecs = usb_calc_bus_time (USB_SPEED_HIGH, is_input, 0,
hb_mult (maxp) * max_packet (maxp));
qh->start = NO_FRAME;
if (urb->dev->speed == USB_SPEED_HIGH) {
qh->c_usecs = 0;
qh->gap_uf = 0;
/* FIXME handle HS periods of less than 1 frame. */
qh->period = urb->interval >> 3;
if (qh->period < 1) {
dbg ("intr period %d uframes, NYET!",
urb->interval);
qh = 0;
goto done;
}
} else {
/* gap is f(FS/LS transfer times) */
qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
is_input, 0, maxp) / (125 * 1000);
/* FIXME this just approximates SPLIT/CSPLIT times */
if (is_input) { // SPLIT, gap, CSPLIT+DATA
qh->c_usecs = qh->usecs + HS_USECS (0);
qh->usecs = HS_USECS (1);
} else { // SPLIT+DATA, gap, CSPLIT
qh->usecs += HS_USECS (1);
qh->c_usecs = HS_USECS (0);
}
qh->period = urb->interval;
}
}
/* using TT? */
switch (urb->dev->speed) {
case USB_SPEED_LOW:
......@@ -643,67 +675,63 @@ ehci_qh_make (
case USB_SPEED_FULL:
/* EPS 0 means "full" */
info1 |= (EHCI_TUNE_RL_TT << 28);
if (usb_pipecontrol (urb->pipe)) {
if (type == PIPE_CONTROL) {
info1 |= (1 << 27); /* for TT */
info1 |= 1 << 14; /* toggle from qtd */
}
info1 |= usb_maxpacket (urb->dev, urb->pipe,
usb_pipeout (urb->pipe)) << 16;
info1 |= maxp << 16;
info2 |= (EHCI_TUNE_MULT_TT << 30);
info2 |= urb->dev->ttport << 23;
info2 |= urb->dev->tt->hub->devnum << 16;
/* NOTE: if (usb_pipeint (urb->pipe)) { scheduler sets c-mask }
* ... and a 0.96 scheduler might use FSTN nodes too
*/
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
break;
case USB_SPEED_HIGH: /* no TT involved */
info1 |= (2 << 12); /* EPS "high" */
info1 |= (EHCI_TUNE_RL_HS << 28);
if (usb_pipecontrol (urb->pipe)) {
if (type == PIPE_CONTROL) {
info1 |= 64 << 16; /* usb2 fixed maxpacket */
info1 |= 1 << 14; /* toggle from qtd */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else if (usb_pipebulk (urb->pipe)) {
} else if (type == PIPE_BULK) {
info1 |= 512 << 16; /* usb2 fixed maxpacket */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else {
u32 temp;
temp = usb_maxpacket (urb->dev, urb->pipe,
usb_pipeout (urb->pipe));
info1 |= (temp & 0x3ff) << 16; /* maxpacket */
/* HS intr can be "high bandwidth" */
temp = 1 + ((temp >> 11) & 0x03);
info2 |= temp << 30; /* mult */
} else { /* PIPE_INTERRUPT */
info1 |= max_packet (maxp) << 16;
info2 |= hb_mult (maxp) << 30;
}
break;
default:
#ifdef DEBUG
default:
BUG ();
#endif
}
/* NOTE: if (usb_pipeint (urb->pipe)) { scheduler sets s-mask } */
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
qh->qh_state = QH_STATE_IDLE;
qh->hw_info1 = cpu_to_le32 (info1);
qh->hw_info2 = cpu_to_le32 (info2);
/* initialize sw and hw queues with these qtds */
list_splice (qtd_list, &qh->qtd_list);
qh_update (qh, list_entry (qtd_list->next, struct ehci_qtd, qtd_list));
if (!list_empty (qtd_list)) {
list_splice (qtd_list, &qh->qtd_list);
qh_update (qh, list_entry (qtd_list->next, struct ehci_qtd, qtd_list));
} else {
qh->hw_qtd_next = qh->hw_alt_next = EHCI_LIST_END;
}
/* initialize data toggle state */
if (!usb_pipecontrol (urb->pipe))
clear_toggle (urb->dev,
usb_pipeendpoint (urb->pipe),
usb_pipeout (urb->pipe),
qh);
clear_toggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, qh);
done:
return qh;
}
#undef hb_mult
#undef hb_packet
/*-------------------------------------------------------------------------*/
......@@ -745,50 +773,48 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
/*-------------------------------------------------------------------------*/
static int
submit_async (
/*
* For control/bulk/interrupt, return QH with these TDs appended.
* Allocates and initializes the QH if necessary.
* Returns null if it can't allocate a QH it needs to.
* If the QH has TDs (urbs) already, that's great.
*/
static struct ehci_qh *qh_append_tds (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list,
int mem_flags
) {
struct ehci_qtd *qtd;
struct hcd_dev *dev;
int epnum;
unsigned long flags;
int epnum,
void **ptr
)
{
struct ehci_qh *qh = 0;
qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
dev = (struct hcd_dev *)urb->dev->hcpriv;
epnum = usb_pipeendpoint (urb->pipe);
if (usb_pipein (urb->pipe))
epnum |= 0x10;
vdbg ("%s: submit_async urb %p len %d ep %d-%s qtd %p [qh %p]",
ehci->hcd.self.bus_name, urb, urb->transfer_buffer_length,
epnum & 0x0f, (epnum & 0x10) ? "in" : "out",
qtd, dev ? dev->ep [epnum] : (void *)~0);
spin_lock_irqsave (&ehci->lock, flags);
qh = (struct ehci_qh *) dev->ep [epnum];
qh = (struct ehci_qh *) *ptr;
if (likely (qh != 0)) {
u32 hw_next = QTD_NEXT (qtd->qtd_dma);
struct ehci_qtd *qtd;
if (unlikely (list_empty (qtd_list)))
qtd = 0;
else
qtd = list_entry (qtd_list->next, struct ehci_qtd,
qtd_list);
/* maybe patch the qh used for set_address */
if (unlikely (epnum == 0
&& le32_to_cpu (qh->hw_info1 & 0x7f) == 0))
qh->hw_info1 |= cpu_to_le32 (usb_pipedevice(urb->pipe));
/* is an URB is queued to this qh already? */
if (unlikely (!list_empty (&qh->qtd_list))) {
/* append to tds already queued to this qh? */
if (unlikely (!list_empty (&qh->qtd_list) && qtd)) {
struct ehci_qtd *last_qtd;
int short_rx = 0;
u32 hw_next;
/* update the last qtd's "next" pointer */
// dbg_qh ("non-empty qh", ehci, qh);
last_qtd = list_entry (qh->qtd_list.prev,
struct ehci_qtd, qtd_list);
hw_next = QTD_NEXT (qtd->qtd_dma);
last_qtd->hw_next = hw_next;
/* previous urb allows short rx? maybe optimize. */
......@@ -803,6 +829,7 @@ submit_async (
* Interrupt code must cope with case of HC having it
* cached, and clobbering these updates.
* ... complicates getting rid of extra interrupts!
* (Or: use dummy td, so cache always stays valid.)
*/
if (qh->hw_current == cpu_to_le32 (last_qtd->qtd_dma)) {
wmb ();
......@@ -822,31 +849,61 @@ submit_async (
*/
/* usb_clear_halt() means qh data toggle gets reset */
if (usb_pipebulk (urb->pipe)
&& unlikely (!usb_gettoggle (urb->dev,
if (unlikely (!usb_gettoggle (urb->dev,
(epnum & 0x0f),
!(epnum & 0x10)))) {
clear_toggle (urb->dev,
epnum & 0x0f, !(epnum & 0x10), qh);
}
qh_update (qh, qtd);
if (qtd)
qh_update (qh, qtd);
}
list_splice (qtd_list, qh->qtd_list.prev);
} else {
/* can't sleep here, we have ehci->lock... */
qh = ehci_qh_make (ehci, urb, qtd_list, SLAB_ATOMIC);
if (likely (qh != 0)) {
// dbg_qh ("new qh", ehci, qh);
dev->ep [epnum] = qh;
}
// if (qh) dbg_qh ("new qh", ehci, qh);
*ptr = qh;
}
if (qh)
urb->hcpriv = qh_get (qh);
return qh;
}
/*-------------------------------------------------------------------------*/
static int
submit_async (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list,
int mem_flags
) {
struct ehci_qtd *qtd;
struct hcd_dev *dev;
int epnum;
unsigned long flags;
struct ehci_qh *qh = 0;
qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
dev = (struct hcd_dev *)urb->dev->hcpriv;
epnum = usb_pipeendpoint (urb->pipe);
if (usb_pipein (urb->pipe))
epnum |= 0x10;
vdbg ("%s: submit_async urb %p len %d ep %d-%s qtd %p [qh %p]",
ehci->hcd.self.bus_name, urb, urb->transfer_buffer_length,
epnum & 0x0f, (epnum & 0x10) ? "in" : "out",
qtd, dev ? dev->ep [epnum] : (void *)~0);
spin_lock_irqsave (&ehci->lock, flags);
qh = qh_append_tds (ehci, urb, qtd_list, epnum, &dev->ep [epnum]);
/* Control/bulk operations through TTs don't need scheduling,
* the HC and TT handle it when the TT has a buffer ready.
*/
if (likely (qh != 0)) {
urb->hcpriv = qh_get (qh);
if (likely (qh->qh_state == QH_STATE_IDLE))
qh_link_async (ehci, qh_get (qh));
}
......@@ -873,7 +930,7 @@ static void end_unlink_async (struct ehci_hcd *ehci)
ehci->reclaim = 0;
ehci->reclaim_ready = 0;
qh_completions (ehci, qh, 1);
qh_completions (ehci, qh);
// unlink any urb should now unlink all following urbs, so that
// relinking only happens for urbs before the unlinked ones.
......@@ -973,13 +1030,15 @@ static void scan_async (struct ehci_hcd *ehci)
spin_unlock_irqrestore (&ehci->lock, flags);
/* concurrent unlink could happen here */
qh_completions (ehci, qh, 1);
qh_completions (ehci, qh);
spin_lock_irqsave (&ehci->lock, flags);
qh_put (ehci, qh);
}
/* unlink idle entries (reduces PCI usage) */
/* unlink idle entries, reducing HC PCI usage as
* well as HCD schedule-scanning costs
*/
if (list_empty (&qh->qtd_list) && !ehci->reclaim) {
if (qh->qh_next.qh != qh) {
// dbg ("irq/empty");
......@@ -987,6 +1046,7 @@ static void scan_async (struct ehci_hcd *ehci)
} else {
// FIXME: arrange to stop
// after it's been idle a while.
// stop/restart isn't free...
}
}
qh = qh->qh_next.qh;
......
......@@ -220,31 +220,31 @@ static int disable_periodic (struct ehci_hcd *ehci)
/*-------------------------------------------------------------------------*/
// FIXME microframe periods not yet handled
static void intr_deschedule (
struct ehci_hcd *ehci,
unsigned frame,
struct ehci_qh *qh,
unsigned period
int wait
) {
unsigned long flags;
int status;
period >>= 3; // FIXME microframe periods not handled yet
unsigned frame = qh->start;
spin_lock_irqsave (&ehci->lock, flags);
do {
periodic_unlink (ehci, frame, qh);
qh_put (ehci, qh);
frame += period;
frame += qh->period;
} while (frame < ehci->periodic_size);
qh->qh_state = QH_STATE_UNLINK;
qh->qh_next.ptr = 0;
ehci->periodic_urbs--;
ehci->periodic_sched--;
/* maybe turn off periodic schedule */
if (!ehci->periodic_urbs)
if (!ehci->periodic_sched)
status = disable_periodic (ehci);
else {
status = 0;
......@@ -258,21 +258,35 @@ static void intr_deschedule (
* (yeech!) to be sure it's done.
* No other threads may be mucking with this qh.
*/
if (!status && ((ehci_get_frame (&ehci->hcd) - frame) % period) == 0)
udelay (125);
if (((ehci_get_frame (&ehci->hcd) - frame) % qh->period) == 0) {
if (wait) {
udelay (125);
qh->hw_next = EHCI_LIST_END;
} else {
/* we may not be IDLE yet, but if the qh is empty
* the race is very short. then if qh also isn't
* rescheduled soon, it won't matter. otherwise...
*/
vdbg ("intr_deschedule...");
}
} else
qh->hw_next = EHCI_LIST_END;
qh->qh_state = QH_STATE_IDLE;
qh->hw_next = EHCI_LIST_END;
/* update per-qh bandwidth utilization (for usbfs) */
ehci->hcd.self.bandwidth_allocated -=
(qh->usecs + qh->c_usecs) / qh->period;
vdbg ("descheduled qh %p, per = %d frame = %d count = %d, urbs = %d",
qh, period, frame,
atomic_read (&qh->refcount), ehci->periodic_urbs);
qh, qh->period, frame,
atomic_read (&qh->refcount), ehci->periodic_sched);
}
static int check_period (
struct ehci_hcd *ehci,
unsigned frame,
int uframe,
unsigned uframe,
unsigned period,
unsigned usecs
) {
......@@ -309,19 +323,142 @@ static int check_period (
return 1;
}
static int check_intr_schedule (
struct ehci_hcd *ehci,
unsigned frame,
unsigned uframe,
const struct ehci_qh *qh,
u32 *c_maskp
)
{
int retval = -ENOSPC;
if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
goto done;
if (!qh->c_usecs) {
retval = 0;
*c_maskp = cpu_to_le32 (0);
goto done;
}
/* This is a split transaction; check the bandwidth available for
* the completion too. Check both worst and best case gaps: worst
* case is SPLIT near uframe end, and CSPLIT near start ... best is
* vice versa. Difference can be almost two uframe times, but we
* reserve unnecessary bandwidth (waste it) this way. (Actually
* even better cases exist, like immediate device NAK.)
*
* FIXME don't even bother unless we know this TT is idle in that
* range of uframes ... for now, check_period() allows only one
* interrupt transfer per frame, so needn't check "TT busy" status
* when scheduling a split (QH, SITD, or FSTN).
*
* FIXME ehci 0.96 and above can use FSTNs
*/
if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
qh->period, qh->c_usecs))
goto done;
if (!check_period (ehci, frame, uframe + qh->gap_uf,
qh->period, qh->c_usecs))
goto done;
*c_maskp = cpu_to_le32 (0x03 << (8 + uframe + qh->gap_uf));
retval = 0;
done:
return retval;
}
static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
int status;
unsigned uframe;
u32 c_mask;
unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
qh->hw_next = EHCI_LIST_END;
frame = qh->start;
/* reuse the previous schedule slots, if we can */
if (frame < qh->period) {
uframe = ffs (le32_to_cpup (&qh->hw_info2) & 0x00ff);
status = check_intr_schedule (ehci, frame, --uframe,
qh, &c_mask);
} else {
uframe = 0;
c_mask = 0;
status = -ENOSPC;
}
/* else scan the schedule to find a group of slots such that all
* uframes have enough periodic bandwidth available.
*/
if (status) {
frame = qh->period - 1;
do {
for (uframe = 0; uframe < 8; uframe++) {
status = check_intr_schedule (ehci,
frame, uframe, qh,
&c_mask);
if (status == 0)
break;
}
} while (status && --frame);
if (status)
goto done;
qh->start = frame;
/* reset S-frame and (maybe) C-frame masks */
qh->hw_info2 &= ~0xffff;
qh->hw_info2 |= cpu_to_le32 (1 << uframe) | c_mask;
} else
dbg ("reused previous qh %p schedule", qh);
/* stuff into the periodic schedule */
qh->qh_state = QH_STATE_LINKED;
dbg ("qh %p usecs %d/%d period %d.0 starting %d.%d (gap %d)",
qh, qh->usecs, qh->c_usecs,
qh->period, frame, uframe, qh->gap_uf);
do {
if (unlikely (ehci->pshadow [frame].ptr != 0)) {
// FIXME -- just link toward the end, before any qh with a shorter period,
// AND accomodate it already having been linked here (after some other qh)
// AS WELL AS updating the schedule checking logic
BUG ();
} else {
ehci->pshadow [frame].qh = qh_get (qh);
ehci->periodic [frame] =
QH_NEXT (qh->qh_dma);
}
wmb ();
frame += qh->period;
} while (frame < ehci->periodic_size);
/* update per-qh bandwidth for usbfs */
ehci->hcd.self.bandwidth_allocated +=
(qh->usecs + qh->c_usecs) / qh->period;
/* maybe enable periodic schedule processing */
if (!ehci->periodic_sched++)
status = enable_periodic (ehci);
done:
return status;
}
static int intr_submit (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list,
int mem_flags
) {
unsigned epnum, period;
unsigned short usecs, c_usecs, gap_uf;
unsigned epnum;
unsigned long flags;
struct ehci_qh *qh;
struct hcd_dev *dev;
int is_input;
int status = 0;
struct list_head empty;
/* get endpoint and transfer/schedule data */
epnum = usb_pipeendpoint (urb->pipe);
......@@ -329,198 +466,30 @@ static int intr_submit (
if (is_input)
epnum |= 0x10;
/*
* HS interrupt transfers are simple -- only one microframe. FS/LS
* interrupt transfers involve a SPLIT in one microframe and CSPLIT
* sometime later. We need to know how much time each will be
* needed in each microframe and, for FS/LS, how many microframes
* separate the two in the best case.
*/
usecs = usb_calc_bus_time (USB_SPEED_HIGH, is_input, 0,
urb->transfer_buffer_length);
if (urb->dev->speed == USB_SPEED_HIGH) {
gap_uf = 0;
c_usecs = 0;
/* FIXME handle HS periods of less than 1 frame. */
period = urb->interval >> 3;
if (period < 1) {
dbg ("intr period %d uframes, NYET!", urb->interval);
status = -EINVAL;
goto done;
}
} else {
/* gap is a function of full/low speed transfer times */
gap_uf = 1 + usb_calc_bus_time (urb->dev->speed, is_input, 0,
urb->transfer_buffer_length) / (125 * 1000);
/* FIXME this just approximates SPLIT/CSPLIT times */
if (is_input) { // SPLIT, gap, CSPLIT+DATA
c_usecs = usecs + HS_USECS (0);
usecs = HS_USECS (1);
} else { // SPLIT+DATA, gap, CSPLIT
usecs = usecs + HS_USECS (1);
c_usecs = HS_USECS (0);
}
period = urb->interval;
}
spin_lock_irqsave (&ehci->lock, flags);
dev = (struct hcd_dev *)urb->dev->hcpriv;
/*
* NOTE: current completion/restart logic doesn't handle more than
* one qtd in a periodic qh ... 16-20 KB/urb is pretty big for this.
* such big requests need many periods to transfer.
*
* FIXME want to change hcd core submit model to expect queuing
* for all transfer types ... not just ISO and (with flag) BULK.
* that means: getting rid of this check; handling the "interrupt
* urb already queued" case below like bulk queuing is handled (no
* errors possible!); and completly getting rid of that annoying
* qh restart logic. simpler/smaller overall, and more flexible.
*/
if (unlikely (qtd_list->next != qtd_list->prev)) {
dbg ("only one intr qtd per urb allowed");
status = -EINVAL;
/* get qh and force any scheduling errors */
INIT_LIST_HEAD (&empty);
qh = qh_append_tds (ehci, urb, &empty, epnum, &dev->ep [epnum]);
if (qh == 0) {
status = -ENOMEM;
goto done;
}
spin_lock_irqsave (&ehci->lock, flags);
/* get the qh (must be empty and idle) */
dev = (struct hcd_dev *)urb->dev->hcpriv;
qh = (struct ehci_qh *) dev->ep [epnum];
if (qh) {
/* only allow one queued interrupt urb per EP */
if (unlikely (qh->qh_state != QH_STATE_IDLE
|| !list_empty (&qh->qtd_list))) {
dbg ("interrupt urb already queued");
status = -EBUSY;
} else {
/* maybe reset hardware's data toggle in the qh */
if (unlikely (!usb_gettoggle (urb->dev, epnum & 0x0f,
!(epnum & 0x10)))) {
qh->hw_token |=
__constant_cpu_to_le32 (QTD_TOGGLE);
usb_settoggle (urb->dev, epnum & 0x0f,
!(epnum & 0x10), 1);
}
/* trust the QH was set up as interrupt ... */
list_splice (qtd_list, &qh->qtd_list);
qh_update (qh, list_entry (qtd_list->next,
struct ehci_qtd, qtd_list));
qtd_list = &qh->qtd_list;
}
} else {
/* can't sleep here, we have ehci->lock... */
qh = ehci_qh_make (ehci, urb, qtd_list, SLAB_ATOMIC);
if (likely (qh != 0)) {
// dbg ("new INTR qh %p", qh);
dev->ep [epnum] = qh;
qtd_list = &qh->qtd_list;
} else
status = -ENOMEM;
if (qh->qh_state == QH_STATE_IDLE) {
if ((status = qh_schedule (ehci, qh)) != 0)
goto done;
}
/* Schedule this periodic QH. */
if (likely (status == 0)) {
unsigned frame = period;
qh->hw_next = EHCI_LIST_END;
qh->usecs = usecs;
qh->c_usecs = c_usecs;
urb->hcpriv = qh_get (qh);
status = -ENOSPC;
/* pick a set of schedule slots, link the QH into them */
do {
int uframe;
u32 c_mask = 0;
/* pick a set of slots such that all uframes have
* enough periodic bandwidth available.
*/
frame--;
for (uframe = 0; uframe < 8; uframe++) {
if (check_period (ehci, frame, uframe,
period, usecs) == 0)
continue;
/* If this is a split transaction, check the
* bandwidth available for the completion
* too. check both best and worst case gaps:
* worst case is SPLIT near uframe end, and
* CSPLIT near start ... best is vice versa.
* Difference can be almost two uframe times.
*
* FIXME don't even bother unless we know
* this TT is idle in that uframe ... right
* now we know only one interrupt transfer
* will be scheduled per frame, so we don't
* need to update/check TT state when we
* schedule a split (QH, SITD, or FSTN).
*
* FIXME ehci 0.96 and above can use FSTNs
*/
if (!c_usecs)
break;
if (check_period (ehci, frame,
uframe + gap_uf,
period, c_usecs) == 0)
continue;
if (check_period (ehci, frame,
uframe + gap_uf + 1,
period, c_usecs) == 0)
continue;
c_mask = 0x03 << (8 + uframe + gap_uf);
c_mask = cpu_to_le32 (c_mask);
break;
}
if (uframe == 8)
continue;
/* QH will run once each period, starting there */
urb->start_frame = frame;
status = 0;
/* reset S-frame and (maybe) C-frame masks */
qh->hw_info2 &= ~0xffff;
qh->hw_info2 |= cpu_to_le32 (1 << uframe) | c_mask;
// dbg_qh ("Schedule INTR qh", ehci, qh);
/* stuff into the periodic schedule */
qh->qh_state = QH_STATE_LINKED;
vdbg ("qh %p usecs %d period %d.0 starting %d.%d",
qh, qh->usecs, period, frame, uframe);
do {
if (unlikely (ehci->pshadow [frame].ptr != 0)) {
// FIXME -- just link toward the end, before any qh with a shorter period,
// AND handle it already being (implicitly) linked into this frame
// AS WELL AS updating the check_period() logic
BUG ();
} else {
ehci->pshadow [frame].qh = qh_get (qh);
ehci->periodic [frame] =
QH_NEXT (qh->qh_dma);
}
wmb ();
frame += period;
} while (frame < ehci->periodic_size);
/* update bandwidth utilization records (for usbfs) */
usb_claim_bandwidth (urb->dev, urb,
(usecs + c_usecs) / period, 0);
/* then queue the urb's tds to the qh */
qh = qh_append_tds (ehci, urb, qtd_list, epnum, &dev->ep [epnum]);
BUG_ON (qh == 0);
/* maybe enable periodic schedule processing */
if (!ehci->periodic_urbs++)
status = enable_periodic (ehci);
break;
/* ... update usbfs periodic stats */
ehci->hcd.self.bandwidth_int_reqs++;
} while (frame);
}
spin_unlock_irqrestore (&ehci->lock, flags);
done:
spin_unlock_irqrestore (&ehci->lock, flags);
if (status)
qtd_list_free (ehci, urb, qtd_list);
......@@ -534,10 +503,6 @@ intr_complete (
struct ehci_qh *qh,
unsigned long flags /* caller owns ehci->lock ... */
) {
struct ehci_qtd *qtd;
struct urb *urb;
int unlinking;
/* nothing to report? */
if (likely ((qh->hw_token & __constant_cpu_to_le32 (QTD_STS_ACTIVE))
!= 0))
......@@ -547,43 +512,14 @@ intr_complete (
return flags;
}
qtd = list_entry (qh->qtd_list.next, struct ehci_qtd, qtd_list);
urb = qtd->urb;
unlinking = (urb->status == -ENOENT) || (urb->status == -ECONNRESET);
/* call any completions, after patching for reactivation */
/* handle any completions */
spin_unlock_irqrestore (&ehci->lock, flags);
/* NOTE: currently restricted to one qtd per qh! */
if (qh_completions (ehci, qh, 0) == 0)
urb = 0;
qh_completions (ehci, qh);
spin_lock_irqsave (&ehci->lock, flags);
/* never reactivate requests that were unlinked ... */
if (likely (urb != 0)) {
if (unlinking
|| urb->status == -ECONNRESET
|| urb->status == -ENOENT
// || (urb->dev == null)
|| ehci->hcd.state == USB_STATE_HALT)
urb = 0;
// FIXME look at all those unlink cases ... we always
// need exactly one completion that reports unlink.
// the one above might not have been it!
}
if (unlikely (list_empty (&qh->qtd_list)))
intr_deschedule (ehci, qh, 0);
/* normally reactivate */
if (likely (urb != 0)) {
if (usb_pipeout (urb->pipe))
pci_dma_sync_single (ehci->hcd.pdev,
qtd->buf_dma,
urb->transfer_buffer_length,
PCI_DMA_TODEVICE);
urb->status = -EINPROGRESS;
urb->actual_length = 0;
/* patch qh and restart */
qh_update (qh, qtd);
}
return flags;
}
......@@ -806,7 +742,7 @@ static int get_iso_range (
/* calculate the legal range [start,max) */
now = readl (&ehci->regs->frame_index) + 1; /* next uframe */
if (!ehci->periodic_urbs)
if (!ehci->periodic_sched)
now += 8; /* startup delay */
now %= mod;
end = now + mod;
......@@ -926,7 +862,7 @@ itd_schedule (struct ehci_hcd *ehci, struct urb *urb)
usb_claim_bandwidth (urb->dev, urb, usecs, 1);
/* maybe enable periodic schedule processing */
if (!ehci->periodic_urbs++) {
if (!ehci->periodic_sched++) {
if ((status = enable_periodic (ehci)) != 0) {
// FIXME deschedule right away
err ("itd_schedule, enable = %d", status);
......@@ -1009,8 +945,8 @@ itd_complete (
spin_lock_irqsave (&ehci->lock, flags);
/* defer stopping schedule; completion can submit */
ehci->periodic_urbs--;
if (!ehci->periodic_urbs)
ehci->periodic_sched--;
if (!ehci->periodic_sched)
(void) disable_periodic (ehci);
return flags;
......
......@@ -50,7 +50,7 @@ struct ehci_hcd { /* one per controller */
union ehci_shadow *pshadow; /* mirror hw periodic table */
int next_uframe; /* scan periodic, start here */
unsigned periodic_urbs; /* how many urbs scheduled? */
unsigned periodic_sched; /* periodic activity count */
/* deferred work from IRQ, etc */
struct tasklet_struct tasklet;
......@@ -72,7 +72,7 @@ struct ehci_hcd { /* one per controller */
};
/* unwrap an HCD pointer to get an EHCI_HCD pointer */
#define hcd_to_ehci(hcd_ptr) list_entry(hcd_ptr, struct ehci_hcd, hcd)
#define hcd_to_ehci(hcd_ptr) container_of(hcd_ptr, struct ehci_hcd, hcd)
/* NOTE: urb->transfer_flags expected to not use this bit !!! */
#define EHCI_STATE_UNLINK 0x8000 /* urb being unlinked */
......@@ -287,12 +287,20 @@ struct ehci_qh {
struct list_head qtd_list; /* sw qtd list */
atomic_t refcount;
unsigned short usecs; /* intr bandwidth */
unsigned short c_usecs; /* ... split completion bw */
short qh_state;
u8 qh_state;
#define QH_STATE_LINKED 1 /* HC sees this */
#define QH_STATE_UNLINK 2 /* HC may still see this */
#define QH_STATE_IDLE 3 /* HC doesn't see this */
/* periodic schedule info */
u8 usecs; /* intr bandwidth */
u8 gap_uf; /* uframes split/csplit gap */
u8 c_usecs; /* ... split completion bw */
unsigned short period; /* polling interval */
unsigned short start; /* where polling starts */
#define NO_FRAME ((unsigned short)~0) /* pick new start */
} __attribute__ ((aligned (32)));
/*-------------------------------------------------------------------------*/
......
......@@ -938,7 +938,7 @@ static void finish_unlinks (struct ohci_hcd *ohci, u16 tick)
/* ED's now officially unlinked, hc doesn't see */
ed->state = ED_IDLE;
ed->hwINFO &= ~ED_SKIP;
ed->hwHeadP &= ~cpu_to_le32 (ED_H);
ed->hwHeadP &= ~ED_H;
ed->hwNextED = 0;
/* but if there's work queued, reschedule */
......
/*
* $Id$
*
* konicawc.c - konica webcam driver
*
* Author: Simon Evans <spse@secret.org.uk>
......@@ -8,7 +6,7 @@
* Copyright (C) 2002 Simon Evans
*
* Licence: GPL
*
*
* Driver for USB webcams based on Konica chipset. This
* chipset is used in Intel YC76 camera.
*
......@@ -18,6 +16,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include "usbvideo.h"
#define MAX_BRIGHTNESS 108
......@@ -26,9 +25,11 @@
#define MAX_SHARPNESS 108
#define MAX_WHITEBAL 372
#define MAX_SPEED 6
#define MAX_CAMERAS 1
#define DRIVER_VERSION "v1.1"
#define DRIVER_VERSION "v1.3"
#define DRIVER_DESC "Konica Webcam driver"
enum ctrl_req {
......@@ -41,18 +42,32 @@ enum ctrl_req {
enum frame_sizes {
SIZE_160X136 = 0,
SIZE_176X144 = 1,
SIZE_320X240 = 2,
SIZE_160X120 = 0,
SIZE_160X136 = 1,
SIZE_176X144 = 2,
SIZE_320X240 = 3,
};
#define MAX_FRAME_SIZE SIZE_320X240
static usbvideo_t *cams;
#ifdef CONFIG_USB_DEBUG
static int debug;
#define DEBUG(n, format, arg...) \
if (n <= debug) { \
printk(KERN_DEBUG __FILE__ ":%s(): " format "\n", __FUNCTION__ , ## arg); \
}
#else
#define DEBUG(n, arg...)
static const int debug = 0;
#endif
/* Some default values for inital camera settings,
can be set by modprobe */
static int debug;
static enum frame_sizes size;
static int speed = 6; /* Speed (fps) 0 (slowest) to 6 (fastest) */
static int brightness = MAX_BRIGHTNESS/2;
......@@ -61,31 +76,36 @@ static int saturation = MAX_SATURATION/2;
static int sharpness = MAX_SHARPNESS/2;
static int whitebal = 3*(MAX_WHITEBAL/4);
static int speed_to_interface[] = { 1, 0, 3, 2, 4, 5, 6 };
static int spd_to_iface[] = { 1, 0, 3, 2, 4, 5, 6 };
/* These FPS speeds are from the windows config box. They are
* indexed on size (0-2) and speed (0-6). Divide by 3 to get the
* real fps.
*/
static int speed_to_fps[3][7] = { { 24, 40, 48, 60, 72, 80, 100 },
{ 18, 30, 36, 45, 54, 60, 75 },
{ 6, 10, 12, 15, 18, 20, 25 } };
static int spd_to_fps[][7] = { { 24, 40, 48, 60, 72, 80, 100 },
{ 24, 40, 48, 60, 72, 80, 100 },
{ 18, 30, 36, 45, 54, 60, 75 },
{ 6, 10, 12, 15, 18, 21, 25 } };
static int camera_sizes[][2] = { { 160, 136 },
{ 176, 144 },
{ 320, 240 },
{ } /* List terminator */
struct cam_size {
u16 width;
u16 height;
u8 cmd;
};
static struct cam_size camera_sizes[] = { { 160, 120, 0x7 },
{ 160, 136, 0xa },
{ 176, 144, 0x4 },
{ 320, 240, 0x5 } };
struct konicawc {
u8 brightness; /* camera uses 0 - 9, x11 for real value */
u8 contrast; /* as above */
u8 saturation; /* as above */
u8 sharpness; /* as above */
u8 white_bal; /* 0 - 33, x11 for real value */
u8 speed; /* Stored as 0 - 6, used as index in speed_to_* (above) */
u8 speed; /* Stored as 0 - 6, used as index in spd_to_* (above) */
u8 size; /* Frame Size */
int height;
int width;
......@@ -93,6 +113,10 @@ struct konicawc {
u8 sts_buf[USBVIDEO_NUMSBUF][FRAMES_PER_DESC];
struct urb *last_data_urb;
int lastframe;
int cur_frame_size; /* number of bytes in current frame size */
int maxline; /* number of lines per frame */
int yplanesz; /* Number of bytes in the Y plane */
unsigned int buttonsts:1;
};
......@@ -110,42 +134,56 @@ static int konicawc_ctrl_msg(uvd_t *uvd, u8 dir, u8 request, u16 value, u16 inde
}
static inline void konicawc_camera_on(uvd_t *uvd)
{
DEBUG(0, "camera on");
konicawc_set_misc(uvd, 0x2, 1, 0x0b);
}
static inline void konicawc_camera_off(uvd_t *uvd)
{
DEBUG(0, "camera off");
konicawc_set_misc(uvd, 0x2, 0, 0x0b);
}
static void konicawc_set_camera_size(uvd_t *uvd)
{
struct konicawc *cam = (struct konicawc *)uvd->user_data;
konicawc_set_misc(uvd, 0x2, camera_sizes[cam->size].cmd, 0x08);
cam->width = camera_sizes[cam->size].width;
cam->height = camera_sizes[cam->size].height;
cam->yplanesz = cam->height * cam->width;
cam->cur_frame_size = (cam->yplanesz * 3) / 2;
cam->maxline = cam->yplanesz / 256;
uvd->videosize = VIDEOSIZE(cam->width, cam->height);
}
static int konicawc_setup_on_open(uvd_t *uvd)
{
struct konicawc *cam = (struct konicawc *)uvd->user_data;
konicawc_set_misc(uvd, 0x2, 0, 0x0b);
dbg("setting brightness to %d (%d)", cam->brightness,
DEBUG(1, "setting brightness to %d (%d)", cam->brightness,
cam->brightness * 11);
konicawc_set_value(uvd, cam->brightness, SetBrightness);
dbg("setting white balance to %d (%d)", cam->white_bal,
DEBUG(1, "setting white balance to %d (%d)", cam->white_bal,
cam->white_bal * 11);
konicawc_set_value(uvd, cam->white_bal, SetWhitebal);
dbg("setting contrast to %d (%d)", cam->contrast,
DEBUG(1, "setting contrast to %d (%d)", cam->contrast,
cam->contrast * 11);
konicawc_set_value(uvd, cam->contrast, SetContrast);
dbg("setting saturation to %d (%d)", cam->saturation,
DEBUG(1, "setting saturation to %d (%d)", cam->saturation,
cam->saturation * 11);
konicawc_set_value(uvd, cam->saturation, SetSaturation);
dbg("setting sharpness to %d (%d)", cam->sharpness,
DEBUG(1, "setting sharpness to %d (%d)", cam->sharpness,
cam->sharpness * 11);
konicawc_set_value(uvd, cam->sharpness, SetSharpness);
dbg("setting size %d", cam->size);
switch(cam->size) {
case 0:
konicawc_set_misc(uvd, 0x2, 0xa, 0x08);
break;
case 1:
konicawc_set_misc(uvd, 0x2, 4, 0x08);
break;
case 2:
konicawc_set_misc(uvd, 0x2, 5, 0x08);
break;
}
konicawc_set_misc(uvd, 0x2, 1, 0x0b);
cam->lastframe = -1;
konicawc_set_camera_size(uvd);
cam->lastframe = -2;
cam->buttonsts = 0;
return 0;
}
......@@ -154,23 +192,25 @@ static void konicawc_adjust_picture(uvd_t *uvd)
{
struct konicawc *cam = (struct konicawc *)uvd->user_data;
dbg("new brightness: %d", uvd->vpic.brightness);
konicawc_camera_off(uvd);
DEBUG(1, "new brightness: %d", uvd->vpic.brightness);
uvd->vpic.brightness = (uvd->vpic.brightness > MAX_BRIGHTNESS) ? MAX_BRIGHTNESS : uvd->vpic.brightness;
if(cam->brightness != uvd->vpic.brightness / 11) {
cam->brightness = uvd->vpic.brightness / 11;
dbg("setting brightness to %d (%d)", cam->brightness,
DEBUG(1, "setting brightness to %d (%d)", cam->brightness,
cam->brightness * 11);
konicawc_set_value(uvd, cam->brightness, SetBrightness);
}
dbg("new contrast: %d", uvd->vpic.contrast);
DEBUG(1, "new contrast: %d", uvd->vpic.contrast);
uvd->vpic.contrast = (uvd->vpic.contrast > MAX_CONTRAST) ? MAX_CONTRAST : uvd->vpic.contrast;
if(cam->contrast != uvd->vpic.contrast / 11) {
cam->contrast = uvd->vpic.contrast / 11;
dbg("setting contrast to %d (%d)", cam->contrast,
DEBUG(1, "setting contrast to %d (%d)", cam->contrast,
cam->contrast * 11);
konicawc_set_value(uvd, cam->contrast, SetContrast);
}
konicawc_camera_on(uvd);
}
......@@ -180,21 +220,20 @@ static int konicawc_compress_iso(uvd_t *uvd, struct urb *dataurb, struct urb *st
int i, totlen = 0;
unsigned char *status = stsurb->transfer_buffer;
int keep = 0, discard = 0, bad = 0;
static int buttonsts = 0;
struct konicawc *cam = (struct konicawc *)uvd->user_data;
for (i = 0; i < dataurb->number_of_packets; i++) {
int button = buttonsts;
int button = cam->buttonsts;
unsigned char sts;
int n = dataurb->iso_frame_desc[i].actual_length;
int st = dataurb->iso_frame_desc[i].status;
cdata = dataurb->transfer_buffer +
cdata = dataurb->transfer_buffer +
dataurb->iso_frame_desc[i].offset;
/* Detect and ignore errored packets */
if (st < 0) {
if (debug >= 1)
err("Data error: packet=%d. len=%d. status=%d.",
i, n, st);
DEBUG(1, "Data error: packet=%d. len=%d. status=%d.",
i, n, st);
uvd->stats.iso_err_count++;
continue;
}
......@@ -210,8 +249,8 @@ static int konicawc_compress_iso(uvd_t *uvd, struct urb *dataurb, struct urb *st
/* sts: 0x80-0xff: frame start with frame number (ie 0-7f)
* otherwise:
* bit 0 0:drop packet (padding data)
* 1 keep packet
* bit 0 0: keep packet
* 1: drop packet (padding data)
*
* bit 4 0 button not clicked
* 1 button clicked
......@@ -225,10 +264,10 @@ static int konicawc_compress_iso(uvd_t *uvd, struct urb *dataurb, struct urb *st
/* work out the button status, but dont do
anything with it for now */
if(button != buttonsts) {
dbg("button: %sclicked", button ? "" : "un");
buttonsts = button;
if(button != cam->buttonsts) {
DEBUG(2, "button: %sclicked", button ? "" : "un");
cam->buttonsts = button;
}
if(sts == 0x01) { /* drop frame */
......@@ -241,34 +280,52 @@ static int konicawc_compress_iso(uvd_t *uvd, struct urb *dataurb, struct urb *st
bad++;
continue;
}
if(!sts && cam->lastframe == -2) {
DEBUG(2, "dropping frame looking for image start");
continue;
}
keep++;
if(*(status+i) & 0x80) { /* frame start */
if(sts & 0x80) { /* frame start */
unsigned char marker[] = { 0, 0xff, 0, 0x00 };
if(debug > 1)
dbg("Adding Marker packet = %d, frame = %2.2x",
i, *(status+i));
marker[3] = *(status+i) - 0x80;
RingQueue_Enqueue(&uvd->dp, marker, 4);
if(cam->lastframe == -2) {
DEBUG(2, "found initial image");
cam->lastframe = -1;
}
marker[3] = sts & 0x7F;
RingQueue_Enqueue(&uvd->dp, marker, 4);
totlen += 4;
}
totlen += n; /* Little local accounting */
if(debug > 5)
dbg("Adding packet %d, bytes = %d", i, n);
RingQueue_Enqueue(&uvd->dp, cdata, n);
}
if(debug > 8) {
dbg("finished: keep = %d discard = %d bad = %d added %d bytes",
DEBUG(8, "finished: keep = %d discard = %d bad = %d added %d bytes",
keep, discard, bad, totlen);
}
return totlen;
}
static void resubmit_urb(uvd_t *uvd, struct urb *urb)
{
int i, ret;
for (i = 0; i < FRAMES_PER_DESC; i++) {
urb->iso_frame_desc[i].status = 0;
}
urb->dev = uvd->dev;
urb->status = 0;
ret = usb_submit_urb(urb, GFP_KERNEL);
DEBUG(3, "submitting urb of length %d", urb->transfer_buffer_length);
if(ret)
err("usb_submit_urb error (%d)", ret);
}
static void konicawc_isoc_irq(struct urb *urb)
{
int i, ret, len = 0;
uvd_t *uvd = urb->context;
struct konicawc *cam = (struct konicawc *)uvd->user_data;
......@@ -277,42 +334,35 @@ static void konicawc_isoc_irq(struct urb *urb)
return;
if (!uvd->streaming) {
if (debug >= 1)
info("Not streaming, but interrupt!");
DEBUG(1, "Not streaming, but interrupt!");
return;
}
if (urb->actual_length > 32) {
cam->last_data_urb = urb;
goto urb_done_with;
}
DEBUG(3, "got frame %d len = %d buflen =%d", urb->start_frame, urb->actual_length, urb->transfer_buffer_length);
uvd->stats.urb_count++;
if (urb->actual_length <= 0)
goto urb_done_with;
if (urb->transfer_buffer_length > 32) {
cam->last_data_urb = urb;
return;
}
/* Copy the data received into ring queue */
if(cam->last_data_urb) {
len = konicawc_compress_iso(uvd, cam->last_data_urb, urb);
for (i = 0; i < FRAMES_PER_DESC; i++) {
cam->last_data_urb->iso_frame_desc[i].status = 0;
}
int len = 0;
if(urb->start_frame != cam->last_data_urb->start_frame)
err("Lost sync on frames");
else if (!urb->status && !cam->last_data_urb->status)
len = konicawc_compress_iso(uvd, cam->last_data_urb, urb);
resubmit_urb(uvd, urb);
resubmit_urb(uvd, cam->last_data_urb);
cam->last_data_urb = NULL;
uvd->stats.urb_length = len;
uvd->stats.data_count += len;
if(len)
RingQueue_WakeUpInterruptible(&uvd->dp);
return;
}
uvd->stats.urb_length = len;
uvd->stats.data_count += len;
if(len)
RingQueue_WakeUpInterruptible(&uvd->dp);
urb_done_with:
for (i = 0; i < FRAMES_PER_DESC; i++) {
urb->iso_frame_desc[i].status = 0;
}
urb->dev = uvd->dev;
urb->status = 0;
ret = usb_submit_urb(urb, GFP_KERNEL);
if(ret)
err("usb_submit_urb error (%d)", ret);
return;
}
......@@ -322,13 +372,18 @@ static int konicawc_start_data(uvd_t *uvd)
struct usb_device *dev = uvd->dev;
int i, errFlag;
struct konicawc *cam = (struct konicawc *)uvd->user_data;
int pktsz;
struct usb_interface_descriptor *interface;
interface = &dev->actconfig->interface[uvd->iface].altsetting[spd_to_iface[cam->speed]];
pktsz = interface->endpoint[1].wMaxPacketSize;
DEBUG(1, "pktsz = %d", pktsz);
if (!CAMERA_IS_OPERATIONAL(uvd)) {
err("Camera is not operational");
return -EFAULT;
}
uvd->curframe = -1;
konicawc_camera_on(uvd);
/* Alternate interface 1 is is the biggest frame size */
i = usb_set_interface(dev, uvd->iface, uvd->ifaceAltActive);
if (i < 0) {
......@@ -349,10 +404,10 @@ static int konicawc_start_data(uvd_t *uvd)
urb->transfer_buffer = uvd->sbuf[i].data;
urb->complete = konicawc_isoc_irq;
urb->number_of_packets = FRAMES_PER_DESC;
urb->transfer_buffer_length = uvd->iso_packet_len * FRAMES_PER_DESC;
for (j=k=0; j < FRAMES_PER_DESC; j++, k += uvd->iso_packet_len) {
urb->transfer_buffer_length = pktsz * FRAMES_PER_DESC;
for (j=k=0; j < FRAMES_PER_DESC; j++, k += pktsz) {
urb->iso_frame_desc[j].offset = k;
urb->iso_frame_desc[j].length = uvd->iso_packet_len;
urb->iso_frame_desc[j].length = pktsz;
}
urb = cam->sts_urb[i];
......@@ -375,18 +430,17 @@ static int konicawc_start_data(uvd_t *uvd)
/* Submit all URBs */
for (i=0; i < USBVIDEO_NUMSBUF; i++) {
errFlag = usb_submit_urb(uvd->sbuf[i].urb, GFP_KERNEL);
if (errFlag)
err ("usb_submit_isoc(%d) ret %d", i, errFlag);
errFlag = usb_submit_urb(cam->sts_urb[i], GFP_KERNEL);
if (errFlag)
err("usb_submit_isoc(%d) ret %d", i, errFlag);
errFlag = usb_submit_urb(uvd->sbuf[i].urb, GFP_KERNEL);
if (errFlag)
err ("usb_submit_isoc(%d) ret %d", i, errFlag);
}
uvd->streaming = 1;
if (debug > 1)
dbg("streaming=1 video_endp=$%02x", uvd->video_endp);
DEBUG(1, "streaming=1 video_endp=$%02x", uvd->video_endp);
return 0;
}
......@@ -399,6 +453,8 @@ static void konicawc_stop_data(uvd_t *uvd)
if ((uvd == NULL) || (!uvd->streaming) || (uvd->dev == NULL))
return;
konicawc_camera_off(uvd);
uvd->streaming = 0;
cam = (struct konicawc *)uvd->user_data;
cam->last_data_urb = NULL;
......@@ -413,8 +469,6 @@ static void konicawc_stop_data(uvd_t *uvd)
err("usb_unlink_urb() error %d.", j);
}
uvd->streaming = 0;
if (!uvd->remove_pending) {
/* Set packet size to 0 */
j = usb_set_interface(uvd->dev, uvd->iface, uvd->ifaceAltInactive);
......@@ -428,42 +482,33 @@ static void konicawc_stop_data(uvd_t *uvd)
static void konicawc_process_isoc(uvd_t *uvd, usbvideo_frame_t *frame)
{
int n;
int maxline, yplanesz;
struct konicawc *cam = (struct konicawc *)uvd->user_data;
assert(uvd != NULL);
int maxline = cam->maxline;
int yplanesz = cam->yplanesz;
assert(frame != NULL);
maxline = (cam->height * cam->width * 3) / (2 * 384);
yplanesz = cam->height * cam->width;
if(debug > 5)
dbg("maxline = %d yplanesz = %d", maxline, yplanesz);
if(debug > 3)
dbg("Frame state = %d", frame->scanstate);
DEBUG(5, "maxline = %d yplanesz = %d", maxline, yplanesz);
DEBUG(3, "Frame state = %d", frame->scanstate);
if(frame->scanstate == ScanState_Scanning) {
int drop = 0;
int curframe;
int fdrops = 0;
if(debug > 3)
dbg("Searching for marker, queue len = %d", RingQueue_GetLength(&uvd->dp));
DEBUG(3, "Searching for marker, queue len = %d", RingQueue_GetLength(&uvd->dp));
while(RingQueue_GetLength(&uvd->dp) >= 4) {
if ((RING_QUEUE_PEEK(&uvd->dp, 0) == 0x00) &&
(RING_QUEUE_PEEK(&uvd->dp, 1) == 0xff) &&
(RING_QUEUE_PEEK(&uvd->dp, 2) == 0x00) &&
(RING_QUEUE_PEEK(&uvd->dp, 3) < 0x80)) {
curframe = RING_QUEUE_PEEK(&uvd->dp, 3);
if(cam->lastframe != -1) {
if(curframe < cam->lastframe) {
fdrops = (curframe + 0x80) - cam->lastframe;
} else {
fdrops = curframe - cam->lastframe;
}
if(cam->lastframe >= 0) {
fdrops = (0x80 + curframe - cam->lastframe) & 0x7F;
fdrops--;
if(fdrops)
if(fdrops) {
info("Dropped %d frames (%d -> %d)", fdrops,
cam->lastframe, curframe);
}
}
cam->lastframe = curframe;
frame->curline = 0;
......@@ -474,18 +519,20 @@ static void konicawc_process_isoc(uvd_t *uvd, usbvideo_frame_t *frame)
RING_QUEUE_DEQUEUE_BYTES(&uvd->dp, 1);
drop++;
}
if(drop)
DEBUG(2, "dropped %d bytes looking for new frame", drop);
}
if(frame->scanstate == ScanState_Scanning)
return;
/* Try to move data from queue into frame buffer
/* Try to move data from queue into frame buffer
* We get data in blocks of 384 bytes made up of:
* 256 Y, 64 U, 64 V.
* This needs to be written out as a Y plane, a U plane and a V plane.
*/
while ( frame->curline < maxline && (n = RingQueue_GetLength(&uvd->dp)) >= 384) {
while ( frame->curline < maxline && (RingQueue_GetLength(&uvd->dp) >= 384)) {
/* Y */
RingQueue_Dequeue(&uvd->dp, frame->data + (frame->curline * 256), 256);
/* U */
......@@ -497,8 +544,7 @@ static void konicawc_process_isoc(uvd_t *uvd, usbvideo_frame_t *frame)
}
/* See if we filled the frame */
if (frame->curline == maxline) {
if(debug > 5)
dbg("got whole frame");
DEBUG(5, "got whole frame");
frame->frameState = FrameState_Done_Hold;
frame->curline = 0;
......@@ -510,10 +556,8 @@ static void konicawc_process_isoc(uvd_t *uvd, usbvideo_frame_t *frame)
static int konicawc_calculate_fps(uvd_t *uvd)
{
struct konicawc *t = uvd->user_data;
dbg("fps = %d", speed_to_fps[t->size][t->speed]/3);
return speed_to_fps[t->size][t->speed]/3;
struct konicawc *cam = uvd->user_data;
return spd_to_fps[cam->size][cam->speed]/3;
}
......@@ -550,10 +594,10 @@ static void konicawc_configure_video(uvd_t *uvd)
uvd->vcap.type = VID_TYPE_CAPTURE;
uvd->vcap.channels = 1;
uvd->vcap.audios = 0;
uvd->vcap.minwidth = camera_sizes[cam->size][0];
uvd->vcap.minheight = camera_sizes[cam->size][1];
uvd->vcap.maxwidth = camera_sizes[cam->size][0];
uvd->vcap.maxheight = camera_sizes[cam->size][1];
uvd->vcap.minwidth = camera_sizes[cam->size].width;
uvd->vcap.minheight = camera_sizes[cam->size].height;
uvd->vcap.maxwidth = camera_sizes[cam->size].width;
uvd->vcap.maxheight = camera_sizes[cam->size].height;
memset(&uvd->vchan, 0, sizeof(uvd->vchan));
uvd->vchan.flags = 0 ;
......@@ -563,15 +607,14 @@ static void konicawc_configure_video(uvd_t *uvd)
strcpy(uvd->vchan.name, "Camera");
/* Talk to device */
dbg("device init");
DEBUG(1, "device init");
if(!konicawc_get_misc(uvd, 0x3, 0, 0x10, buf, 2))
dbg("3,10 -> %2.2x %2.2x", buf[0], buf[1]);
DEBUG(2, "3,10 -> %2.2x %2.2x", buf[0], buf[1]);
if(!konicawc_get_misc(uvd, 0x3, 0, 0x10, buf, 2))
dbg("3,10 -> %2.2x %2.2x", buf[0], buf[1]);
DEBUG(2, "3,10 -> %2.2x %2.2x", buf[0], buf[1]);
if(konicawc_set_misc(uvd, 0x2, 0, 0xd))
dbg("2,0,d failed");
dbg("setting initial values");
DEBUG(2, "2,0,d failed");
DEBUG(1, "setting initial values");
}
......@@ -582,8 +625,7 @@ static void *konicawc_probe(struct usb_device *dev, unsigned int ifnum, const st
int actInterface=-1, inactInterface=-1, maxPS=0;
unsigned char video_ep = 0;
if (debug >= 1)
dbg("konicawc_probe(%p,%u.)", dev, ifnum);
DEBUG(1, "konicawc_probe(%p,%u.)", dev, ifnum);
/* We don't handle multi-config cameras */
if (dev->descriptor.bNumConfigurations != 1)
......@@ -594,10 +636,8 @@ static void *konicawc_probe(struct usb_device *dev, unsigned int ifnum, const st
/* Validate found interface: must have one ISO endpoint */
nas = dev->actconfig->interface[ifnum].num_altsetting;
if (debug > 0)
info("Number of alternate settings=%d.", nas);
if (nas < 8) {
err("Too few alternate settings for this camera!");
if (nas != 8) {
err("Incorrect number of alternate settings (%d) for this camera!", nas);
return NULL;
}
/* Validate all alternate settings */
......@@ -612,7 +652,7 @@ static void *konicawc_probe(struct usb_device *dev, unsigned int ifnum, const st
return NULL;
}
endpoint = &interface->endpoint[1];
dbg("found endpoint: addr: 0x%2.2x maxps = 0x%4.4x",
DEBUG(1, "found endpoint: addr: 0x%2.2x maxps = 0x%4.4x",
endpoint->bEndpointAddress, endpoint->wMaxPacketSize);
if (video_ep == 0)
video_ep = endpoint->bEndpointAddress;
......@@ -636,22 +676,20 @@ static void *konicawc_probe(struct usb_device *dev, unsigned int ifnum, const st
return NULL;
}
} else {
if (i == speed_to_interface[speed]) {
if (i == spd_to_iface[speed]) {
/* This one is the requested one */
actInterface = i;
maxPS = endpoint->wMaxPacketSize;
if (debug > 0) {
info("Selecting requested active setting=%d. maxPS=%d.",
i, maxPS);
}
}
}
if(endpoint->wMaxPacketSize > maxPS)
maxPS = endpoint->wMaxPacketSize;
}
if(actInterface == -1) {
err("Cant find required endpoint");
return NULL;
}
DEBUG(1, "Selecting requested active setting=%d. maxPS=%d.", actInterface, maxPS);
/* Code below may sleep, need to lock module while we are here */
MOD_INC_USE_COUNT;
......@@ -670,26 +708,10 @@ static void *konicawc_probe(struct usb_device *dev, unsigned int ifnum, const st
}
}
cam->speed = speed;
switch(size) {
case SIZE_160X136:
default:
cam->height = 136;
cam->width = 160;
cam->size = SIZE_160X136;
break;
case SIZE_176X144:
cam->height = 144;
cam->width = 176;
cam->size = SIZE_176X144;
break;
case SIZE_320X240:
cam->height = 240;
cam->width = 320;
cam->size = SIZE_320X240;
break;
}
RESTRICT_TO_RANGE(size, SIZE_160X120, SIZE_320X240);
cam->width = camera_sizes[size].width;
cam->height = camera_sizes[size].height;
cam->size = size;
uvd->flags = 0;
uvd->debug = debug;
......@@ -773,9 +795,9 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Simon Evans <spse@secret.org.uk>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_PARM(speed, "i");
MODULE_PARM_DESC(speed, "FPS speed: 0 (slowest) - 6 (fastest)");
MODULE_PARM_DESC(speed, "Initial speed: 0 (slowest) - 6 (fastest)");
MODULE_PARM(size, "i");
MODULE_PARM_DESC(size, "Frame Size 0: 160x136 1: 176x144 2: 320x240");
MODULE_PARM_DESC(size, "Initial Size 0: 160x120 1: 160x136 2: 176x144 3: 320x240");
MODULE_PARM(brightness, "i");
MODULE_PARM_DESC(brightness, "Initial brightness 0 - 108");
MODULE_PARM(contrast, "i");
......@@ -786,7 +808,11 @@ MODULE_PARM(sharpness, "i");
MODULE_PARM_DESC(sharpness, "Initial brightness 0 - 108");
MODULE_PARM(whitebal, "i");
MODULE_PARM_DESC(whitebal, "Initial white balance 0 - 363");
#ifdef CONFIG_USB_DEBUG
MODULE_PARM(debug, "i");
MODULE_PARM_DESC(debug, "Debug level: 0-9 (default=0)");
#endif
module_init(konicawc_init);
module_exit(konicawc_cleanup);
......@@ -41,10 +41,6 @@
#include <linux/ticable.h>
#include "tiglusb.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
# define minor(x) MINOR(x)
#endif
/*
* Version Information
*/
......
// Portions of this file taken from
// Portions of this file taken from
// Petko Manolov - Petkan (petkan@dce.bg)
// from his driver pegasus.c
......@@ -1170,23 +1170,20 @@ static void * CDCEther_probe( struct usb_device *usb, unsigned int ifnum,
if (rc) {
// Nope we couldn't find one we liked.
// This device was not meant for us to control.
kfree( ether_dev );
return NULL;
goto error_all;
}
// Now that we FOUND a configuration. let's try to make the
// Now that we FOUND a configuration. let's try to make the
// device go into it.
if ( usb_set_configuration( usb, ether_dev->bConfigurationValue ) ) {
err("usb_set_configuration() failed");
kfree( ether_dev );
return NULL;
goto error_all;
}
// Now set the communication interface up as required.
if (usb_set_interface(usb, ether_dev->comm_bInterfaceNumber, ether_dev->comm_bAlternateSetting)) {
err("usb_set_interface() failed");
kfree( ether_dev );
return NULL;
goto error_all;
}
// Only turn traffic on right now if we must...
......@@ -1194,23 +1191,21 @@ static void * CDCEther_probe( struct usb_device *usb, unsigned int ifnum,
// We found an alternate setting for the data
// interface that allows us to turn off traffic.
// We should use it.
if (usb_set_interface( usb,
ether_dev->data_bInterfaceNumber,
if (usb_set_interface( usb,
ether_dev->data_bInterfaceNumber,
ether_dev->data_bAlternateSetting_without_traffic)) {
err("usb_set_interface() failed");
kfree( ether_dev );
return NULL;
goto error_all;
}
} else {
// We didn't find an alternate setting for the data
// interface that would let us turn off traffic.
// Oh well, let's go ahead and do what we must...
if (usb_set_interface( usb,
ether_dev->data_bInterfaceNumber,
if (usb_set_interface( usb,
ether_dev->data_bInterfaceNumber,
ether_dev->data_bAlternateSetting_with_traffic)) {
err("usb_set_interface() failed");
kfree( ether_dev );
return NULL;
goto error_all;
}
}
......@@ -1220,8 +1215,7 @@ static void * CDCEther_probe( struct usb_device *usb, unsigned int ifnum,
// Hmm... The kernel is not sharing today...
// Fine, we didn't want it anyway...
err( "Unable to initialize ethernet device" );
kfree( ether_dev );
return NULL;
goto error_all;
}
// Now that we have an ethernet device, let's set it up
......@@ -1241,7 +1235,7 @@ static void * CDCEther_probe( struct usb_device *usb, unsigned int ifnum,
// We'll keep track of this information for later...
ether_dev->usb = usb;
ether_dev->net = net;
// and don't forget the MAC address.
set_ethernet_addr( ether_dev );
......@@ -1249,12 +1243,12 @@ static void * CDCEther_probe( struct usb_device *usb, unsigned int ifnum,
log_device_info( ether_dev );
// I claim this interface to be a CDC Ethernet Networking device
usb_driver_claim_interface( &CDCEther_driver,
&(usb->config[ether_dev->configuration_num].interface[ether_dev->comm_interface]),
usb_driver_claim_interface( &CDCEther_driver,
&(usb->config[ether_dev->configuration_num].interface[ether_dev->comm_interface]),
ether_dev );
// I claim this interface to be a CDC Ethernet Networking device
usb_driver_claim_interface( &CDCEther_driver,
&(usb->config[ether_dev->configuration_num].interface[ether_dev->data_interface]),
usb_driver_claim_interface( &CDCEther_driver,
&(usb->config[ether_dev->configuration_num].interface[ether_dev->data_interface]),
ether_dev );
// Does this REALLY do anything???
......@@ -1265,6 +1259,14 @@ static void * CDCEther_probe( struct usb_device *usb, unsigned int ifnum,
// Okay, we are finally done...
return NULL;
// bailing out with our tail between our knees
error_all:
usb_free_urb(ether_dev->tx_urb);
usb_free_urb(ether_dev->rx_urb);
usb_free_urb(ether_dev->intr_urb);
kfree( ether_dev );
return NULL;
}
......
......@@ -147,7 +147,8 @@ static int queuecommand( Scsi_Cmnd *srb , void (*done)(Scsi_Cmnd *))
srb->host_scribble = (unsigned char *)us;
/* enqueue the command */
BUG_ON(atomic_read(&us->sm_state) != US_STATE_IDLE || us->srb != NULL);
BUG_ON(atomic_read(&us->sm_state) != US_STATE_IDLE);
BUG_ON(us->srb != NULL);
srb->scsi_done = done;
us->srb = srb;
......
......@@ -203,16 +203,9 @@ extern void fill_inquiry_response(struct us_data *us,
/* The scsi_lock() and scsi_unlock() macros protect the sm_state and the
* single queue element srb for write access */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,3)
#define scsi_unlock(host) spin_unlock_irq(host->host_lock)
#define scsi_lock(host) spin_lock_irq(host->host_lock)
#define sg_address(psg) (page_address((psg)->page) + (psg)->offset)
#else
#define scsi_unlock(host) spin_unlock_irq(&io_request_lock)
#define scsi_lock(host) spin_lock_irq(&io_request_lock)
#define sg_address(psg) ((psg)->address)
#endif
#endif
......@@ -874,7 +874,7 @@ struct file_operations def_blk_fops = {
release: blkdev_close,
llseek: block_llseek,
read: generic_file_read,
write: generic_file_write,
write: generic_file_write_nolock,
mmap: generic_file_mmap,
fsync: block_fsync,
ioctl: blkdev_ioctl,
......
......@@ -21,6 +21,7 @@ struct file_operations fat_file_operations = {
write: fat_file_write,
mmap: generic_file_mmap,
fsync: file_fsync,
sendfile: generic_file_sendfile,
};
struct inode_operations fat_file_inode_operations = {
......
......@@ -137,11 +137,6 @@ char *disk_name (struct gendisk *hd, int minor, char *buf)
sprintf(s, "%s%d", "md", unit);
maj = s;
break;
case COMPAQ_CISS_MAJOR ... COMPAQ_CISS_MAJOR+7:
sprintf(s, "cciss/c%dd%d",
hd->major - COMPAQ_CISS_MAJOR, unit);
maj = s;
break;
case ATARAID_MAJOR:
sprintf(s, "ataraid/d%d", unit);
maj = s;
......
#ifndef _ALPHA_RTC_H
#define _ALPHA_RTC_H
/*
* Alpha uses the default access methods for the RTC.
*/
#include <asm-generic/rtc.h>
#endif
/*
* inclue/asm-generic/rtc.h
*
* Author: Tom Rini <trini@mvista.com>
*
* Based on:
* drivers/char/rtc.c
*
* Please read the COPYING file for all license details.
*/
#ifndef __ASM_RTC_H__
#define __ASM_RTC_H__
#ifdef __KERNEL__
#include <linux/mc146818rtc.h>
#include <linux/rtc.h>
#define RTC_PIE 0x40 /* periodic interrupt enable */
#define RTC_AIE 0x20 /* alarm interrupt enable */
#define RTC_UIE 0x10 /* update-finished interrupt enable */
extern void gen_rtc_interrupt(unsigned long);
/* some dummy definitions */
#define RTC_SQWE 0x08 /* enable square-wave output */
#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
/*
* Returns true if a clock update is in progress
*/
static inline unsigned char rtc_is_updating(void)
{
unsigned char uip;
spin_lock_irq(&rtc_lock);
uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
spin_unlock_irq(&rtc_lock);
return uip;
}
static inline void get_rtc_time(struct rtc_time *time)
{
unsigned long uip_watchdog = jiffies;
unsigned char ctrl;
#ifdef CONFIG_DECSTATION
unsigned int real_year;
#endif
/*
* read RTC once any update in progress is done. The update
* can take just over 2ms. We wait 10 to 20ms. There is no need to
* to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
* If you need to know *exactly* when a second has started, enable
* periodic update complete interrupts, (via ioctl) and then
* immediately read /dev/rtc which will block until you get the IRQ.
* Once the read clears, read the RTC time (again via ioctl). Easy.
*/
if (rtc_is_updating() != 0)
while (jiffies - uip_watchdog < 2*HZ/100) {
barrier();
cpu_relax();
}
/*
* Only the values that we read from the RTC are set. We leave
* tm_wday, tm_yday and tm_isdst untouched. Even though the
* RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated
* by the RTC when initially set to a non-zero value.
*/
spin_lock_irq(&rtc_lock);
time->tm_sec = CMOS_READ(RTC_SECONDS);
time->tm_min = CMOS_READ(RTC_MINUTES);
time->tm_hour = CMOS_READ(RTC_HOURS);
time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
time->tm_mon = CMOS_READ(RTC_MONTH);
time->tm_year = CMOS_READ(RTC_YEAR);
#ifdef CONFIG_DECSTATION
real_year = CMOS_READ(RTC_DEC_YEAR);
#endif
ctrl = CMOS_READ(RTC_CONTROL);
spin_unlock_irq(&rtc_lock);
if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
{
BCD_TO_BIN(time->tm_sec);
BCD_TO_BIN(time->tm_min);
BCD_TO_BIN(time->tm_hour);
BCD_TO_BIN(time->tm_mday);
BCD_TO_BIN(time->tm_mon);
BCD_TO_BIN(time->tm_year);
}
#ifdef CONFIG_DECSTATION
time->tm_year += real_year - 72;
#endif
/*
* Account for differences between how the RTC uses the values
* and how they are defined in a struct rtc_time;
*/
if (time->tm_year <= 69)
time->tm_year += 100;
time->tm_mon--;
}
/* Set the current date and time in the real time clock. */
static inline int set_rtc_time(struct rtc_time *time)
{
unsigned char mon, day, hrs, min, sec;
unsigned char save_control, save_freq_select;
unsigned int yrs;
#ifdef CONFIG_DECSTATION
unsigned int real_yrs, leap_yr;
#endif
yrs = time->tm_year;
mon = time->tm_mon + 1; /* tm_mon starts at zero */
day = time->tm_mday;
hrs = time->tm_hour;
min = time->tm_min;
sec = time->tm_sec;
if (yrs > 255) /* They are unsigned */
return -EINVAL;
spin_lock_irq(&rtc_lock);
#ifdef CONFIG_DECSTATION
real_yrs = yrs;
leap_yr = ((!((yrs + 1900) % 4) && ((yrs + 1900) % 100)) ||
!((yrs + 1900) % 400));
yrs = 72;
/*
* We want to keep the year set to 73 until March
* for non-leap years, so that Feb, 29th is handled
* correctly.
*/
if (!leap_yr && mon < 3) {
real_yrs--;
yrs = 73;
}
#endif
/* These limits and adjustments are independant of
* whether the chip is in binary mode or not.
*/
if (yrs > 169) {
spin_unlock_irq(&rtc_lock);
return -EINVAL;
}
if (yrs >= 100)
yrs -= 100;
if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
|| RTC_ALWAYS_BCD) {
BIN_TO_BCD(sec);
BIN_TO_BCD(min);
BIN_TO_BCD(hrs);
BIN_TO_BCD(day);
BIN_TO_BCD(mon);
BIN_TO_BCD(yrs);
}
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
#ifdef CONFIG_DECSTATION
CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
#endif
CMOS_WRITE(yrs, RTC_YEAR);
CMOS_WRITE(mon, RTC_MONTH);
CMOS_WRITE(day, RTC_DAY_OF_MONTH);
CMOS_WRITE(hrs, RTC_HOURS);
CMOS_WRITE(min, RTC_MINUTES);
CMOS_WRITE(sec, RTC_SECONDS);
CMOS_WRITE(save_control, RTC_CONTROL);
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
spin_unlock_irq(&rtc_lock);
return 0;
}
static inline unsigned int get_rtc_ss(void)
{
struct rtc_time h;
get_rtc_time(&h);
return h.tm_sec;
}
static inline int get_rtc_pll(struct rtc_pll_info *pll)
{
return -EINVAL;
}
static inline int set_rtc_pll(struct rtc_pll_info *pll)
{
return -EINVAL;
}
#endif /* __KERNEL__ */
#endif /* __ASM_RTC_H__ */
#ifndef _I386_RTC_H
#define _I386_RTC_H
/*
* x86 uses the default access methods for the RTC.
*/
#include <asm-generic/rtc.h>
#endif
/*
* inclue/asm-parisc/rtc.h
*
* Copyright 2002 Randolph CHung <tausq@debian.org>
*
* Based on: include/asm-ppc/rtc.h and the genrtc driver in the
* 2.4 parisc linux tree
*/
#ifndef __ASM_RTC_H__
#define __ASM_RTC_H__
#ifdef __KERNEL__
#include <linux/rtc.h>
#include <asm/pdc.h>
#define SECS_PER_HOUR (60 * 60)
#define SECS_PER_DAY (SECS_PER_HOUR * 24)
#define RTC_PIE 0x40 /* periodic interrupt enable */
#define RTC_AIE 0x20 /* alarm interrupt enable */
#define RTC_UIE 0x10 /* update-finished interrupt enable */
extern void gen_rtc_interrupt(unsigned long);
/* some dummy definitions */
#define RTC_SQWE 0x08 /* enable square-wave output */
#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
# define __isleap(year) \
((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0))
/* How many days come before each month (0-12). */
static const unsigned short int __mon_yday[2][13] =
{
/* Normal years. */
{ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 },
/* Leap years. */
{ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
};
static int get_rtc_time(struct rtc_time *wtime)
{
struct pdc_tod tod_data;
long int days, rem, y;
const unsigned short int *ip;
if(pdc_tod_read(&tod_data) < 0)
return -1;
// most of the remainder of this function is:
// Copyright (C) 1991, 1993, 1997, 1998 Free Software Foundation, Inc.
// This was originally a part of the GNU C Library.
// It is distributed under the GPL, and was swiped from offtime.c
days = tod_data.tod_sec / SECS_PER_DAY;
rem = tod_data.tod_sec % SECS_PER_DAY;
wtime->tm_hour = rem / SECS_PER_HOUR;
rem %= SECS_PER_HOUR;
wtime->tm_min = rem / 60;
wtime->tm_sec = rem % 60;
y = 1970;
#define DIV(a, b) ((a) / (b) - ((a) % (b) < 0))
#define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400))
while (days < 0 || days >= (__isleap (y) ? 366 : 365))
{
/* Guess a corrected year, assuming 365 days per year. */
long int yg = y + days / 365 - (days % 365 < 0);
/* Adjust DAYS and Y to match the guessed year. */
days -= ((yg - y) * 365
+ LEAPS_THRU_END_OF (yg - 1)
- LEAPS_THRU_END_OF (y - 1));
y = yg;
}
wtime->tm_year = y - 1900;
ip = __mon_yday[__isleap(y)];
for (y = 11; days < (long int) ip[y]; --y)
continue;
days -= ip[y];
wtime->tm_mon = y;
wtime->tm_mday = days + 1;
return 0;
}
static int set_rtc_time(struct rtc_time *wtime)
{
u_int32_t secs;
secs = mktime(wtime->tm_year + 1900, wtime->tm_mon + 1, wtime->tm_mday,
wtime->tm_hour, wtime->tm_min, wtime->tm_sec);
if(pdc_tod_set(secs, 0) < 0)
return -1;
else
return 0;
}
static inline unsigned int get_rtc_ss(void)
{
struct rtc_time h;
get_rtc_time(&h);
return h.tm_sec;
}
static inline int get_rtc_pll(struct rtc_pll_info *pll)
{
return -EINVAL;
}
static inline int set_rtc_pll(struct rtc_pll_info *pll)
{
return -EINVAL;
}
#endif /* __KERNEL__ */
#endif /* __ASM_RTC_H__ */
/*
* inclue/asm-ppc/rtc.h
*
* Copyright 2002 MontaVista Software Inc.
* Author: Tom Rini <trini@mvista.com>
*
* Based on:
* include/asm-m68k/rtc.h
*
* Copyright Richard Zidlicky
* implementation details for genrtc/q40rtc driver
*
* And the old drivers/macintosh/rtc.c which was heavily based on:
* Linux/SPARC Real Time Clock Driver
* Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
*
* With additional work by Paul Mackerras and Franz Sirl.
*/
/* permission is hereby granted to copy, modify and redistribute this code
* in terms of the GNU Library General Public License, Version 2 or later,
* at your option.
*/
#ifndef __ASM_RTC_H__
#define __ASM_RTC_H__
#ifdef __KERNEL__
#include <linux/rtc.h>
#include <asm/machdep.h>
#include <asm/time.h>
#define RTC_PIE 0x40 /* periodic interrupt enable */
#define RTC_AIE 0x20 /* alarm interrupt enable */
#define RTC_UIE 0x10 /* update-finished interrupt enable */
extern void gen_rtc_interrupt(unsigned long);
/* some dummy definitions */
#define RTC_SQWE 0x08 /* enable square-wave output */
#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
static inline void get_rtc_time(struct rtc_time *time)
{
if (ppc_md.get_rtc_time) {
unsigned long nowtime;
nowtime = (ppc_md.get_rtc_time)();
to_tm(nowtime, time);
time->tm_year -= 1900;
time->tm_mon -= 1; /* Make sure userland has a 0-based month */
}
}
/* Set the current date and time in the real time clock. */
static inline void set_rtc_time(struct rtc_time *time)
{
if (ppc_md.get_rtc_time) {
unsigned long nowtime;
nowtime = mktime(time->tm_year+1900, time->tm_mon+1,
time->tm_mday, time->tm_hour, time->tm_min,
time->tm_sec);
(ppc_md.set_rtc_time)(nowtime);
return 0;
} else
return -EINVAL;
}
static inline unsigned int get_rtc_ss(void)
{
struct rtc_time h;
get_rtc_time(&h);
return h.tm_sec;
}
static inline int get_rtc_pll(struct rtc_pll_info *pll)
{
return -EINVAL;
}
static inline int set_rtc_pll(struct rtc_pll_info *pll)
{
return -EINVAL;
}
#endif /* __KERNEL__ */
#endif /* __ASM_RTC_H__ */
......@@ -21,8 +21,7 @@ typedef unsigned short apm_eventinfo_t;
#ifdef __KERNEL__
#define APM_40 (GDT_ENTRY_APMBIOS_BASE * 8)
#define APM_CS (APM_BASE + 8)
#define APM_CS (GDT_ENTRY_APMBIOS_BASE * 8)
#define APM_CS_16 (APM_CS + 8)
#define APM_DS (APM_CS_16 + 8)
......
......@@ -1241,6 +1241,7 @@ extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
extern ssize_t generic_file_read(struct file *, char *, size_t, loff_t *);
extern ssize_t generic_file_write(struct file *, const char *, size_t, loff_t *);
extern ssize_t generic_file_write_nolock(struct file *, const char *, size_t, loff_t *);
extern ssize_t generic_file_sendfile(struct file *, struct file *, loff_t *, size_t);
extern void do_generic_file_read(struct file *, loff_t *, read_descriptor_t *, read_actor_t);
ssize_t generic_file_direct_IO(int rw, struct inode *inode, char *buf,
......
......@@ -5,34 +5,34 @@
#define INIT_FILES \
{ \
count: ATOMIC_INIT(1), \
file_lock: RW_LOCK_UNLOCKED, \
max_fds: NR_OPEN_DEFAULT, \
max_fdset: __FD_SETSIZE, \
next_fd: 0, \
fd: &init_files.fd_array[0], \
close_on_exec: &init_files.close_on_exec_init, \
open_fds: &init_files.open_fds_init, \
close_on_exec_init: { { 0, } }, \
open_fds_init: { { 0, } }, \
fd_array: { NULL, } \
.count = ATOMIC_INIT(1), \
.file_lock = RW_LOCK_UNLOCKED, \
.max_fds = NR_OPEN_DEFAULT, \
.max_fdset = __FD_SETSIZE, \
.next_fd = 0, \
.fd = &init_files.fd_array[0], \
.close_on_exec = &init_files.close_on_exec_init, \
.open_fds = &init_files.open_fds_init, \
.close_on_exec_init = { { 0, } }, \
.open_fds_init = { { 0, } }, \
.fd_array = { NULL, } \
}
#define INIT_MM(name) \
{ \
mm_rb: RB_ROOT, \
pgd: swapper_pg_dir, \
mm_users: ATOMIC_INIT(2), \
mm_count: ATOMIC_INIT(1), \
mmap_sem: __RWSEM_INITIALIZER(name.mmap_sem), \
page_table_lock: SPIN_LOCK_UNLOCKED, \
mmlist: LIST_HEAD_INIT(name.mmlist), \
.mm_rb = RB_ROOT, \
.pgd = swapper_pg_dir, \
.mm_users = ATOMIC_INIT(2), \
.mm_count = ATOMIC_INIT(1), \
.mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem), \
.page_table_lock = SPIN_LOCK_UNLOCKED, \
.mmlist = LIST_HEAD_INIT(name.mmlist), \
}
#define INIT_SIGNALS { \
count: ATOMIC_INIT(1), \
action: { {{0,}}, }, \
siglock: SPIN_LOCK_UNLOCKED \
.count = ATOMIC_INIT(1), \
.action = { {{0,}}, }, \
.siglock = SPIN_LOCK_UNLOCKED \
}
/*
......@@ -41,45 +41,45 @@
*/
#define INIT_TASK(tsk) \
{ \
state: 0, \
thread_info: &init_thread_info, \
flags: 0, \
lock_depth: -1, \
prio: MAX_PRIO-20, \
static_prio: MAX_PRIO-20, \
policy: SCHED_NORMAL, \
cpus_allowed: -1, \
mm: NULL, \
active_mm: &init_mm, \
run_list: LIST_HEAD_INIT(tsk.run_list), \
time_slice: HZ, \
tasks: LIST_HEAD_INIT(tsk.tasks), \
real_parent: &tsk, \
parent: &tsk, \
children: LIST_HEAD_INIT(tsk.children), \
sibling: LIST_HEAD_INIT(tsk.sibling), \
thread_group: LIST_HEAD_INIT(tsk.thread_group), \
wait_chldexit: __WAIT_QUEUE_HEAD_INITIALIZER(tsk.wait_chldexit),\
real_timer: { \
function: it_real_fn \
}, \
cap_effective: CAP_INIT_EFF_SET, \
cap_inheritable: CAP_INIT_INH_SET, \
cap_permitted: CAP_FULL_SET, \
keep_capabilities: 0, \
rlim: INIT_RLIMITS, \
user: INIT_USER, \
comm: "swapper", \
thread: INIT_THREAD, \
fs: &init_fs, \
files: &init_files, \
sigmask_lock: SPIN_LOCK_UNLOCKED, \
sig: &init_signals, \
pending: { NULL, &tsk.pending.head, {{0}}}, \
blocked: {{0}}, \
alloc_lock: SPIN_LOCK_UNLOCKED, \
switch_lock: SPIN_LOCK_UNLOCKED, \
journal_info: NULL, \
.state = 0, \
.thread_info = &init_thread_info, \
.flags = 0, \
.lock_depth = -1, \
.prio = MAX_PRIO-20, \
.static_prio = MAX_PRIO-20, \
.policy = SCHED_NORMAL, \
.cpus_allowed = -1, \
.mm = NULL, \
.active_mm = &init_mm, \
.run_list = LIST_HEAD_INIT(tsk.run_list), \
.time_slice = HZ, \
.tasks = LIST_HEAD_INIT(tsk.tasks), \
.real_parent = &tsk, \
.parent = &tsk, \
.children = LIST_HEAD_INIT(tsk.children), \
.sibling = LIST_HEAD_INIT(tsk.sibling), \
.thread_group = LIST_HEAD_INIT(tsk.thread_group), \
.wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(tsk.wait_chldexit),\
.real_timer = { \
.function = it_real_fn \
}, \
.cap_effective = CAP_INIT_EFF_SET, \
.cap_inheritable = CAP_INIT_INH_SET, \
.cap_permitted = CAP_FULL_SET, \
.keep_capabilities = 0, \
.rlim = INIT_RLIMITS, \
.user = INIT_USER, \
.comm = "swapper", \
.thread = INIT_THREAD, \
.fs = &init_fs, \
.files = &init_files, \
.sigmask_lock = SPIN_LOCK_UNLOCKED, \
.sig = &init_signals, \
.pending = { NULL, &tsk.pending.head, {{0}}}, \
.blocked = {{0}}, \
.alloc_lock = SPIN_LOCK_UNLOCKED, \
.switch_lock = SPIN_LOCK_UNLOCKED, \
.journal_info = NULL, \
}
......
......@@ -39,10 +39,32 @@ struct rtc_wkalrm {
struct rtc_time time; /* time the alarm is set to */
};
/*
* Data structure to control PLL correction some better RTC feature
* pll_value is used to get or set current value of correction,
* the rest of the struct is used to query HW capabilities.
* This is modeled after the RTC used in Q40/Q60 computers but
* should be sufficiently flexible for other devices
*
* +ve pll_value means clock will run faster by
* pll_value*pll_posmult/pll_clock
* -ve pll_value means clock will run slower by
* pll_value*pll_negmult/pll_clock
*/
struct rtc_pll_info {
int pll_ctrl; /* placeholder for fancier control */
int pll_value; /* get/set correction value */
int pll_max; /* max +ve (faster) adjustment value */
int pll_min; /* max -ve (slower) adjustment value */
int pll_posmult; /* factor for +ve corection */
int pll_negmult; /* factor for -ve corection */
long pll_clock; /* base PLL frequency */
};
/*
* ioctl calls that are permitted to the /dev/rtc interface, if
* CONFIG_RTC/CONFIG_EFI_RTC was enabled.
* any of the RTC drivers are enabled.
*/
#define RTC_AIE_ON _IO('p', 0x01) /* Alarm int. enable on */
......@@ -65,6 +87,8 @@ struct rtc_wkalrm {
#define RTC_WKALM_SET _IOW('p', 0x0f, struct rtc_wkalrm)/* Set wakeup alarm*/
#define RTC_WKALM_RD _IOR('p', 0x10, struct rtc_wkalrm)/* Get wakeup alarm*/
#define RTC_PLL_GET _IOR('p', 0x11, struct rtc_pll_info) /* Get PLL correction */
#define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */
#ifdef __KERNEL__
......
......@@ -30,6 +30,7 @@ struct svc_sock {
#define SK_TEMP 4 /* temp (TCP) socket */
#define SK_QUED 5 /* on serv->sk_sockets */
#define SK_DEAD 6 /* socket closed */
#define SK_CHNGBUF 7 /* need to change snd/rcv buffer sizes */
int sk_reserved; /* space on outq that is reserved */
......
......@@ -734,6 +734,7 @@ extern void usb_deregister_dev(int num_minors, int start_minor);
*/
#define URB_SHORT_NOT_OK 0x0001 /* report short reads as errors */
#define USB_ISO_ASAP 0x0002 /* iso-only, urb->start_frame ignored */
#define URB_NO_DMA_MAP 0x0004 /* urb->*_dma are valid on submit */
#define USB_ASYNC_UNLINK 0x0008 /* usb_unlink_urb() returns asap */
#define USB_NO_FSBR 0x0020 /* UHCI-specific */
#define USB_ZERO_PACKET 0x0040 /* Finish bulk OUTs with short packet */
......@@ -771,11 +772,15 @@ typedef void (*usb_complete_t)(struct urb *);
* @transfer_flags: A variety of flags may be used to affect how URB
* submission, unlinking, or operation are handled. Different
* kinds of URB can use different flags.
* @transfer_buffer: For non-iso transfers, this identifies the buffer
* to (or from) which the I/O request will be performed. This
* buffer must be suitable for DMA; allocate it with kmalloc()
* @transfer_buffer: This identifies the buffer to (or from) which
* the I/O request will be performed (unless URB_NO_DMA_MAP is set).
* This buffer must be suitable for DMA; allocate it with kmalloc()
* or equivalent. For transfers to "in" endpoints, contents of
* this buffer will be modified.
* this buffer will be modified. This buffer is used for data
* phases of control transfers.
* @transfer_dma: When transfer_flags includes URB_NO_DMA_MAP, the device
* driver is saying that it provided this DMA address, which the host
* controller driver should use instead of the transfer_buffer.
* @transfer_buffer_length: How big is transfer_buffer. The transfer may
* be broken up into chunks according to the current maximum packet
* size for the endpoint, which is a function of the configuration
......@@ -789,6 +794,11 @@ typedef void (*usb_complete_t)(struct urb *);
* @setup_packet: Only used for control transfers, this points to eight bytes
* of setup data. Control transfers always start by sending this data
* to the device. Then transfer_buffer is read or written, if needed.
* (Not used when URB_NO_DMA_MAP is set.)
* @setup_dma: For control transfers with URB_NO_DMA_MAP set, the device
* driver has provided this DMA address for the setup packet. The
* host controller driver should use instead of setup_buffer.
* If there is a data phase, its buffer is identified by transfer_dma.
* @start_frame: Returns the initial frame for interrupt or isochronous
* transfers.
* @number_of_packets: Lists the number of ISO transfer buffers.
......@@ -811,6 +821,23 @@ typedef void (*usb_complete_t)(struct urb *);
* are submitted using usb_submit_urb(), and pending requests may be canceled
* using usb_unlink_urb().
*
* Data Transfer Buffers:
*
* Normally drivers provide I/O buffers allocated with kmalloc() or otherwise
* taken from the general page pool. That is provided by transfer_buffer
* (control requests also use setup_packet), and host controller drivers
* perform a dma mapping (and unmapping) for each buffer transferred. Those
* mapping operations can be expensive on some platforms (such using a dma
* bounce buffer), although they're cheap on commodity x86 and ppc hardware.
*
* Alternatively, drivers may pass the URB_NO_DMA_MAP transfer flag, which
* tells the host controller driver that no such mapping is needed since
* the device driver is DMA-aware. For example, they might allocate a DMA
* buffer with usb_buffer_alloc(), or call usb_buffer_map().
* When this transfer flag is provided, host controller drivers will use the
* dma addresses found in the transfer_dma and/or setup_dma fields rather than
* determing a dma address themselves.
*
* Initialization:
*
* All URBs submitted must initialize dev, pipe,
......@@ -818,10 +845,10 @@ typedef void (*usb_complete_t)(struct urb *);
* The USB_ASYNC_UNLINK transfer flag affects later invocations of
* the usb_unlink_urb() routine.
*
* All non-isochronous URBs must also initialize
* All URBs must also initialize
* transfer_buffer and transfer_buffer_length. They may provide the
* URB_SHORT_NOT_OK transfer flag, indicating that short reads are
* to be treated as errors.
* to be treated as errors; that flag is invalid for write requests.
*
* Bulk URBs may
* use the USB_ZERO_PACKET transfer flag, indicating that bulk OUT transfers
......@@ -896,10 +923,12 @@ struct urb
int status; /* (return) non-ISO status */
unsigned int transfer_flags; /* (in) URB_SHORT_NOT_OK | ...*/
void *transfer_buffer; /* (in) associated data buffer */
dma_addr_t transfer_dma; /* (in) dma addr for transfer_buffer */
int transfer_buffer_length; /* (in) data buffer length */
int actual_length; /* (return) actual transfer length */
int bandwidth; /* bandwidth for INT/ISO request */
unsigned char *setup_packet; /* (in) setup packet (control only) */
dma_addr_t setup_dma; /* (in) dma addr for setup_packet */
int start_frame; /* (modify) start frame (INT/ISO) */
int number_of_packets; /* (in) number of ISO packets */
int interval; /* (in) transfer interval (INT/ISO) */
......@@ -910,6 +939,8 @@ struct urb
struct usb_iso_packet_descriptor iso_frame_desc[0]; /* (in) ISO ONLY */
};
/* -------------------------------------------------------------------------- */
/**
* usb_fill_control_urb - initializes a control urb
* @urb: pointer to the urb to initialize.
......@@ -1032,6 +1063,16 @@ extern struct urb *usb_get_urb(struct urb *urb);
extern int usb_submit_urb(struct urb *urb, int mem_flags);
extern int usb_unlink_urb(struct urb *urb);
#define HAVE_USB_BUFFERS
void *usb_buffer_alloc (struct usb_device *dev, size_t size,
int mem_flags, dma_addr_t *dma);
void usb_buffer_free (struct usb_device *dev, size_t size,
void *addr, dma_addr_t dma);
struct urb *usb_buffer_map (struct urb *urb);
void usb_buffer_dmasync (struct urb *urb);
void usb_buffer_unmap (struct urb *urb);
/*-------------------------------------------------------------------*
* SYNCHRONOUS CALL SUPPORT *
*-------------------------------------------------------------------*/
......
#ifndef __LINUX_VMALLOC_H
#define __LINUX_VMALLOC_H
#ifndef _LINUX_VMALLOC_H
#define _LINUX_VMALLOC_H
#include <linux/spinlock.h>
#include <asm/pgtable.h>
/* bits in vm_struct->flags */
#define VM_IOREMAP 0x00000001 /* ioremap() and friends */
#define VM_ALLOC 0x00000002 /* vmalloc() */
#define VM_MAP 0x00000004 /* vmap()ed pages */
struct vm_struct {
unsigned long flags;
void * addr;
unsigned long size;
unsigned long phys_addr;
struct vm_struct * next;
void *addr;
unsigned long size;
unsigned long flags;
struct page **pages;
unsigned int nr_pages;
unsigned long phys_addr;
struct vm_struct *next;
};
extern struct vm_struct * get_vm_area (unsigned long size, unsigned long flags);
extern void vfree(void * addr);
extern void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot);
extern long vread(char *buf, char *addr, unsigned long count);
extern void vmfree_area_pages(unsigned long address, unsigned long size);
extern int vmalloc_area_pages(unsigned long address, unsigned long size,
int gfp_mask, pgprot_t prot);
extern struct vm_struct *remove_kernel_area(void *addr);
/*
* Various ways to allocate pages.
* Highlevel APIs for driver use
*/
extern void * vmalloc(unsigned long size);
extern void * vmalloc_32(unsigned long size);
extern void *vmalloc(unsigned long size);
extern void *vmalloc_32(unsigned long size);
extern void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot);
extern void vfree(void *addr);
extern void *vmap(struct page **pages, unsigned int count);
extern void vunmap(void *addr);
/*
* Lowlevel-APIs (not for driver use!)
*/
extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
extern struct vm_struct *remove_vm_area(void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page ***pages);
extern void unmap_vm_area(struct vm_struct *area);
/*
* vmlist_lock is a read-write spinlock that protects vmlist
* Used in mm/vmalloc.c (get_vm_area() and vfree()) and fs/proc/kcore.c.
* Internals. Dont't use..
*/
extern rwlock_t vmlist_lock;
extern struct vm_struct *vmlist;
extern struct vm_struct * vmlist;
#endif
#endif /* _LINUX_VMALLOC_H */
......@@ -43,16 +43,16 @@ typedef struct __wait_queue_head wait_queue_head_t;
*/
#define __WAITQUEUE_INITIALIZER(name, tsk) { \
task: tsk, \
func: default_wake_function, \
task_list: { NULL, NULL } }
.task = tsk, \
.func = default_wake_function, \
.task_list = { NULL, NULL } }
#define DECLARE_WAITQUEUE(name, tsk) \
wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
lock: SPIN_LOCK_UNLOCKED, \
task_list: { &(name).task_list, &(name).task_list } }
.lock = SPIN_LOCK_UNLOCKED, \
.task_list = { &(name).task_list, &(name).task_list } }
#define DECLARE_WAIT_QUEUE_HEAD(name) \
wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
......
......@@ -24,16 +24,16 @@
extern int get_filesystem_list(char * buf);
asmlinkage long sys_mount(char *dev_name, char *dir_name, char *type,
extern asmlinkage long sys_mount(char *dev_name, char *dir_name, char *type,
unsigned long flags, void *data);
asmlinkage long sys_mkdir(char *name, int mode);
asmlinkage long sys_chdir(char *name);
asmlinkage long sys_chroot(char *name);
asmlinkage long sys_unlink(char *name);
asmlinkage long sys_symlink(char *old, char *new);
asmlinkage long sys_mknod(char *name, int mode, dev_t dev);
asmlinkage long sys_umount(char *name, int flags);
asmlinkage long sys_ioctl(int fd, int cmd, unsigned long arg);
extern asmlinkage long sys_mkdir(const char *name, int mode);
extern asmlinkage long sys_chdir(const char *name);
extern asmlinkage long sys_chroot(const char *name);
extern asmlinkage long sys_unlink(const char *name);
extern asmlinkage long sys_symlink(const char *old, const char *new);
extern asmlinkage long sys_mknod(const char *name, int mode, dev_t dev);
extern asmlinkage long sys_umount(char *name, int flags);
extern asmlinkage long sys_ioctl(int fd, int cmd, unsigned long arg);
#ifdef CONFIG_BLK_DEV_INITRD
unsigned int real_root_dev; /* do_proc_dointvec cannot handle kdev_t */
......
......@@ -109,6 +109,8 @@ EXPORT_SYMBOL(vfree);
EXPORT_SYMBOL(__vmalloc);
EXPORT_SYMBOL(vmalloc);
EXPORT_SYMBOL(vmalloc_32);
EXPORT_SYMBOL(vmap);
EXPORT_SYMBOL(vunmap);
EXPORT_SYMBOL(vmalloc_to_page);
EXPORT_SYMBOL(mem_map);
EXPORT_SYMBOL(remap_page_range);
......@@ -220,6 +222,7 @@ EXPORT_SYMBOL(generic_file_read);
EXPORT_SYMBOL(generic_file_sendfile);
EXPORT_SYMBOL(do_generic_file_read);
EXPORT_SYMBOL(generic_file_write);
EXPORT_SYMBOL(generic_file_write_nolock);
EXPORT_SYMBOL(generic_file_mmap);
EXPORT_SYMBOL(generic_ro_fops);
EXPORT_SYMBOL(file_lock_list);
......
......@@ -1109,7 +1109,7 @@ static int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned
return written;
}
ssize_t generic_file_sendfile(struct file *in_file, struct file *out_file,
ssize_t generic_file_sendfile(struct file *out_file, struct file *in_file,
loff_t *ppos, size_t count)
{
read_descriptor_t desc;
......@@ -1812,9 +1812,8 @@ inline void remove_suid(struct dentry *dentry)
* it for writing by marking it dirty.
* okir@monad.swb.de
*/
ssize_t
generic_file_write(struct file *file, const char *buf,
size_t count, loff_t *ppos)
ssize_t generic_file_write_nolock(struct file *file, const char *buf,
size_t count, loff_t *ppos)
{
struct address_space * mapping = file->f_dentry->d_inode->i_mapping;
struct address_space_operations *a_ops = mapping->a_ops;
......@@ -1829,18 +1828,15 @@ generic_file_write(struct file *file, const char *buf,
unsigned bytes;
time_t time_now;
if (unlikely((ssize_t) count < 0))
if (unlikely((ssize_t)count < 0))
return -EINVAL;
if (unlikely(!access_ok(VERIFY_READ, buf, count)))
return -EFAULT;
down(&inode->i_sem);
pos = *ppos;
if (unlikely(pos < 0)) {
err = -EINVAL;
goto out;
}
if (unlikely(pos < 0))
return -EINVAL;
if (unlikely(file->f_error)) {
err = file->f_error;
......@@ -2037,6 +2033,18 @@ generic_file_write(struct file *file, const char *buf,
out_status:
err = written ? written : status;
out:
return err;
}
ssize_t generic_file_write(struct file *file, const char *buf,
size_t count, loff_t *ppos)
{
struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
int err;
down(&inode->i_sem);
err = generic_file_write_nolock(file, buf, count, ppos);
up(&inode->i_sem);
return err;
}
......@@ -4,27 +4,28 @@
* Copyright (C) 1993 Linus Torvalds
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
* SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
* Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
*/
#include <linux/config.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/smp_lock.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
struct vm_struct * vmlist;
struct vm_struct *vmlist;
static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
static inline void unmap_area_pte(pmd_t *pmd, unsigned long address,
unsigned long size)
{
pte_t * pte;
unsigned long end;
pte_t *pte;
if (pmd_none(*pmd))
return;
......@@ -33,11 +34,13 @@ static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned lo
pmd_clear(pmd);
return;
}
pte = pte_offset_kernel(pmd, address);
address &= ~PMD_MASK;
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
pte_t page;
page = ptep_get_and_clear(pte);
......@@ -45,24 +48,17 @@ static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned lo
pte++;
if (pte_none(page))
continue;
if (pte_present(page)) {
struct page *ptpage;
unsigned long pfn = pte_pfn(page);
if (!pfn_valid(pfn))
continue;
ptpage = pfn_to_page(pfn);
if (!PageReserved(ptpage))
__free_page(ptpage);
if (pte_present(page))
continue;
}
printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
} while (address < end);
}
static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
static inline void unmap_area_pmd(pgd_t *dir, unsigned long address,
unsigned long size)
{
pmd_t * pmd;
unsigned long end;
pmd_t *pmd;
if (pgd_none(*dir))
return;
......@@ -71,36 +67,23 @@ static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned lo
pgd_clear(dir);
return;
}
pmd = pmd_offset(dir, address);
address &= ~PGDIR_MASK;
end = address + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
do {
free_area_pte(pmd, address, end - address);
unmap_area_pte(pmd, address, end - address);
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
} while (address < end);
}
void vmfree_area_pages(unsigned long start, unsigned long size)
{
pgd_t * dir;
unsigned long address = start;
unsigned long end = start + size;
dir = pgd_offset_k(address);
flush_cache_all();
do {
free_area_pmd(dir, address, end - address);
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
} while (address && (address < end));
flush_tlb_kernel_range(start, end);
}
static inline int alloc_area_pte (pte_t * pte, unsigned long address,
unsigned long size, int gfp_mask, pgprot_t prot)
static inline int map_area_pte(pte_t *pte, unsigned long address,
unsigned long size, pgprot_t prot,
struct page ***pages)
{
unsigned long end;
......@@ -108,23 +91,26 @@ static inline int alloc_area_pte (pte_t * pte, unsigned long address,
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
struct page * page;
spin_unlock(&init_mm.page_table_lock);
page = alloc_page(gfp_mask);
spin_lock(&init_mm.page_table_lock);
struct page *page = **pages;
if (!pte_none(*pte))
printk(KERN_ERR "alloc_area_pte: page already exists\n");
if (!page)
return -ENOMEM;
set_pte(pte, mk_pte(page, prot));
address += PAGE_SIZE;
pte++;
(*pages)++;
} while (address < end);
return 0;
}
static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, int gfp_mask, pgprot_t prot)
static inline int map_area_pmd(pmd_t *pmd, unsigned long address,
unsigned long size, pgprot_t prot,
struct page ***pages)
{
unsigned long end;
......@@ -132,76 +118,108 @@ static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo
end = address + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
do {
pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
if (!pte)
return -ENOMEM;
if (alloc_area_pte(pte, address, end - address, gfp_mask, prot))
if (map_area_pte(pte, address, end - address, prot, pages))
return -ENOMEM;
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
} while (address < end);
return 0;
}
inline int vmalloc_area_pages (unsigned long address, unsigned long size,
int gfp_mask, pgprot_t prot)
void unmap_vm_area(struct vm_struct *area)
{
unsigned long address = VMALLOC_VMADDR(area->addr);
unsigned long end = (address + area->size);
pgd_t *dir;
dir = pgd_offset_k(address);
flush_cache_all();
do {
unmap_area_pmd(dir, address, end - address);
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
} while (address && (address < end));
flush_tlb_kernel_range(VMALLOC_VMADDR(area->addr), end);
}
int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
{
pgd_t * dir;
unsigned long end = address + size;
int ret;
unsigned long address = VMALLOC_VMADDR(area->addr);
unsigned long end = address + (area->size-PAGE_SIZE);
pgd_t *dir;
dir = pgd_offset_k(address);
spin_lock(&init_mm.page_table_lock);
do {
pmd_t *pmd;
pmd = pmd_alloc(&init_mm, dir, address);
ret = -ENOMEM;
pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
if (!pmd)
break;
ret = -ENOMEM;
if (alloc_area_pmd(pmd, address, end - address, gfp_mask, prot))
break;
return -ENOMEM;
if (map_area_pmd(pmd, address, end - address, prot, pages))
return -ENOMEM;
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
ret = 0;
} while (address && (address < end));
spin_unlock(&init_mm.page_table_lock);
flush_cache_all();
return ret;
return 0;
}
struct vm_struct * get_vm_area(unsigned long size, unsigned long flags)
/**
* get_vm_area - reserve a contingous kernel virtual area
*
* @size: size of the area
* @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
*
* Search an area of @size in the kernel virtual mapping area,
* and reserved it for out purposes. Returns the area descriptor
* on success or %NULL on failure.
*/
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
unsigned long addr;
struct vm_struct **p, *tmp, *area;
unsigned long addr = VMALLOC_START;
area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
if (!area)
area = kmalloc(sizeof(*area), GFP_KERNEL);
if (unlikely(!area))
return NULL;
/*
* We always allocate a guard page.
*/
size += PAGE_SIZE;
addr = VMALLOC_START;
write_lock(&vmlist_lock);
for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
for (p = &vmlist; (tmp = *p) ;p = &tmp->next) {
if ((size + addr) < addr)
goto out;
if (size + addr <= (unsigned long) tmp->addr)
break;
addr = tmp->size + (unsigned long) tmp->addr;
if (size + addr <= (unsigned long)tmp->addr)
goto found;
addr = tmp->size + (unsigned long)tmp->addr;
if (addr > VMALLOC_END-size)
goto out;
}
area->phys_addr = 0;
found:
area->next = *p;
*p = area;
area->flags = flags;
area->addr = (void *)addr;
area->size = size;
area->next = *p;
*p = area;
area->pages = NULL;
area->nr_pages = 0;
area->phys_addr = 0;
write_unlock(&vmlist_lock);
return area;
out:
......@@ -210,87 +228,203 @@ struct vm_struct * get_vm_area(unsigned long size, unsigned long flags)
return NULL;
}
struct vm_struct *remove_kernel_area(void *addr)
/**
* remove_vm_area - find and remove a contingous kernel virtual area
*
* @addr: base address
*
* Search for the kernel VM area starting at @addr, and remove it.
* This function returns the found VM area, but using it is NOT safe
* on SMP machines.
*/
struct vm_struct *remove_vm_area(void *addr)
{
struct vm_struct **p, *tmp;
write_lock(&vmlist_lock);
for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
if (tmp->addr == addr) {
*p = tmp->next;
write_unlock(&vmlist_lock);
return tmp;
}
write_lock(&vmlist_lock);
for (p = &vmlist ; (tmp = *p) ;p = &tmp->next) {
if (tmp->addr == addr)
goto found;
}
write_unlock(&vmlist_lock);
return NULL;
}
void vfree(void * addr)
found:
*p = tmp->next;
write_unlock(&vmlist_lock);
return tmp;
}
void __vunmap(void *addr, int deallocate_pages)
{
struct vm_struct *tmp;
struct vm_struct *area;
if (!addr)
return;
if ((PAGE_SIZE-1) & (unsigned long) addr) {
if ((PAGE_SIZE-1) & (unsigned long)addr) {
printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
return;
}
tmp = remove_kernel_area(addr);
if (tmp) {
vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
kfree(tmp);
return;
area = remove_vm_area(addr);
if (unlikely(!area)) {
printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
addr);
return;
}
unmap_vm_area(area);
if (deallocate_pages) {
int i;
for (i = 0; i < area->nr_pages; i++) {
if (unlikely(!area->pages[i]))
BUG();
__free_page(area->pages[i]);
}
printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr);
kfree(area->pages);
}
kfree(area);
return;
}
/*
* Allocate any pages
/**
* vfree - release memory allocated by vmalloc()
*
* @addr: memory base address
*
* Free the virtually continguos memory area starting at @addr, as
* obtained from vmalloc(), vmalloc_32() or __vmalloc().
*
* May not be called in interrupt context.
*/
void * vmalloc (unsigned long size)
void vfree(void *addr)
{
return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
__vunmap(addr, 1);
}
/*
* Allocate ISA addressable pages for broke crap
/**
* vunmap - release virtual mapping obtained by vmap()
*
* @addr: memory base address
*
* Free the virtually continguos memory area starting at @addr,
* which was created from the page array passed to vmap().
*
* May not be called in interrupt context.
*/
void * vmalloc_dma (unsigned long size)
void vunmap(void *addr)
{
return __vmalloc(size, GFP_KERNEL|GFP_DMA, PAGE_KERNEL);
__vunmap(addr, 0);
}
/*
* vmalloc 32bit PA addressable pages - eg for PCI 32bit devices
/**
* vmap - map an array of pages into virtually continguos space
*
* @pages: array of page pointers
* @count: number of pages to map
*
* Maps @count pages from @pages into continguos kernel virtual
* space.
*/
void * vmalloc_32(unsigned long size)
void *vmap(struct page **pages, unsigned int count)
{
return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
struct vm_struct *area;
if (count > num_physpages)
return NULL;
area = get_vm_area((count << PAGE_SHIFT), VM_MAP);
if (!area)
return NULL;
if (map_vm_area(area, PAGE_KERNEL, &pages)) {
vunmap(area->addr);
return NULL;
}
return area->addr;
}
void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot)
/**
* __vmalloc - allocate virtually continguos memory
*
* @size: allocation size
* @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages
*
* Allocate enough pages to cover @size from the page level
* allocator with @gfp_mask flags. Map them into continguos
* kernel virtual space, using a pagetable protection of @prot.
*/
void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot)
{
void * addr;
struct vm_struct *area;
struct page **pages;
unsigned int nr_pages, array_size, i;
size = PAGE_ALIGN(size);
if (!size || (size >> PAGE_SHIFT) > num_physpages) {
BUG();
if (!size || (size >> PAGE_SHIFT) > num_physpages)
return NULL;
}
area = get_vm_area(size, VM_ALLOC);
if (!area)
return NULL;
addr = area->addr;
if (vmalloc_area_pages(VMALLOC_VMADDR(addr), size, gfp_mask, prot)) {
vfree(addr);
nr_pages = (size+PAGE_SIZE) >> PAGE_SHIFT;
array_size = (nr_pages * sizeof(struct page *));
area->nr_pages = nr_pages;
area->pages = pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM));
if (!area->pages)
return NULL;
memset(area->pages, 0, array_size);
for (i = 0; i < area->nr_pages; i++) {
area->pages[i] = alloc_page(gfp_mask);
if (unlikely(!area->pages[i]))
goto fail;
}
return addr;
if (map_vm_area(area, prot, &pages))
goto fail;
return area->addr;
fail:
vfree(area->addr);
return NULL;
}
/**
* vmalloc - allocate virtually continguos memory
*
* @size: allocation size
*
* Allocate enough pages to cover @size from the page level
* allocator and map them into continguos kernel virtual space.
*
* For tight cotrol over page level allocator and protection flags
* use __vmalloc() instead.
*/
void *vmalloc(unsigned long size)
{
return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
}
/**
* vmalloc_32 - allocate virtually continguos memory (32bit addressable)
*
* @size: allocation size
*
* Allocate enough 32bit PA addressable pages to cover @size from the
* page level allocator and map them into continguos kernel virtual space.
*/
void *vmalloc_32(unsigned long size)
{
return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
}
long vread(char *buf, char *addr, unsigned long count)
......
......@@ -481,6 +481,16 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
u32 *data;
int err, len;
if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
/* udp sockets need large rcvbuf as all pending
* requests are still in that buffer. sndbuf must
* also be large enough that there is enough space
* for one reply per thread.
*/
svc_sock_setbufsize(svsk->sk_sock,
(serv->sv_nrthreads+3) * serv->sv_bufsz,
(serv->sv_nrthreads+3) * serv->sv_bufsz);
clear_bit(SK_DATA, &svsk->sk_flags);
while ((skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) {
svc_sock_received(svsk);
......@@ -564,6 +574,8 @@ svc_udp_init(struct svc_sock *svsk)
svsk->sk_recvfrom = svc_udp_recvfrom;
svsk->sk_sendto = svc_udp_sendto;
set_bit(SK_CHNGBUF, &svsk->sk_flags);
return 0;
}
......@@ -771,6 +783,18 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
return 0;
}
if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
/* sndbuf needs to have room for one request
* per thread, otherwise we can stall even when the
* network isn't a bottleneck.
* rcvbuf just needs to be able to hold a few requests.
* Normally they will be removed from the queue
* as soon a a complete request arrives.
*/
svc_sock_setbufsize(svsk->sk_sock,
(serv->sv_nrthreads+3) * serv->sv_bufsz,
3 * serv->sv_bufsz);
clear_bit(SK_DATA, &svsk->sk_flags);
/* Receive data. If we haven't got the record length yet, get
......@@ -787,6 +811,9 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
goto error;
svsk->sk_tcplen += len;
if (len < want)
return 0;
svsk->sk_reclen = ntohl(svsk->sk_reclen);
if (!(svsk->sk_reclen & 0x80000000)) {
/* FIXME: technically, a record can be fragmented,
......@@ -887,8 +914,9 @@ svc_tcp_sendto(struct svc_rqst *rqstp)
sent = svc_sendto(rqstp, bufp->iov, bufp->nriov);
if (sent != bufp->len<<2) {
printk(KERN_NOTICE "rpc-srv/tcp: %s: sent only %d bytes of %d - should shutdown socket\n",
printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
rqstp->rq_sock->sk_server->sv_name,
(sent<0)?"got error":"sent only",
sent, bufp->len << 2);
svc_delete_socket(rqstp->rq_sock);
sent = -EAGAIN;
......@@ -916,17 +944,7 @@ svc_tcp_init(struct svc_sock *svsk)
svsk->sk_reclen = 0;
svsk->sk_tcplen = 0;
/* sndbuf needs to have room for one request
* per thread, otherwise we can stall even when the
* network isn't a bottleneck.
* rcvbuf just needs to be able to hold a few requests.
* Normally they will be removed from the queue
* as soon a a complete request arrives.
*/
svc_sock_setbufsize(svsk->sk_sock,
(svsk->sk_server->sv_nrthreads+3) *
svsk->sk_server->sv_bufsz,
3 * svsk->sk_server->sv_bufsz);
set_bit(SK_CHNGBUF, &svsk->sk_flags);
}
return 0;
......@@ -945,30 +963,12 @@ svc_sock_update_bufs(struct svc_serv *serv)
list_for_each(le, &serv->sv_permsocks) {
struct svc_sock *svsk =
list_entry(le, struct svc_sock, sk_list);
struct socket *sock = svsk->sk_sock;
if (sock->type == SOCK_DGRAM) {
/* udp sockets need large rcvbuf as all pending
* requests are still in that buffer.
*/
svc_sock_setbufsize(sock,
(serv->sv_nrthreads+3) * serv->sv_bufsz,
(serv->sv_nrthreads+3) * serv->sv_bufsz);
} else if (svsk->sk_sk->state != TCP_LISTEN) {
printk(KERN_ERR "RPC update_bufs: permanent sock neither UDP or TCP_LISTEN\n");
}
set_bit(SK_CHNGBUF, &svsk->sk_flags);
}
list_for_each(le, &serv->sv_tempsocks) {
struct svc_sock *svsk =
list_entry(le, struct svc_sock, sk_list);
struct socket *sock = svsk->sk_sock;
if (sock->type == SOCK_STREAM) {
/* See svc_tcp_init above for rationale on buffer sizes */
svc_sock_setbufsize(sock,
(serv->sv_nrthreads+3) *
serv->sv_bufsz,
3 * serv->sv_bufsz);
} else
printk(KERN_ERR "RPC update_bufs: temp sock not TCP\n");
set_bit(SK_CHNGBUF, &svsk->sk_flags);
}
spin_unlock_bh(&serv->sv_lock);
}
......@@ -1212,6 +1212,7 @@ svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin)
return error;
if (sin != NULL) {
sock->sk->reuse = 1; /* allow address reuse */
error = sock->ops->bind(sock, (struct sockaddr *) sin,
sizeof(*sin));
if (error < 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment