Commit 570cc4e0 authored by Alan Cox's avatar Alan Cox Committed by Linus Torvalds

[PATCH] Update i2o core functionality to 2.5

parent 5b4831c9
/*
* I2O Configuration Interface Driver
*
* (C) Copyright 1999 Red Hat Software
* (C) Copyright 1999-2002 Red Hat
*
* Written by Alan Cox, Building Number Three Ltd
*
......@@ -16,17 +16,17 @@
* - Fixed ioctl_swdl()
* Modified 10/04/1999 by Taneli Vhkangas
* - Changed ioctl_swdl(), implemented ioctl_swul() and ioctl_swdel()
* Modified 11/18/199 by Deepak Saxena
* Modified 11/18/1999 by Deepak Saxena
* - Added event managmenet support
*
* 2.4 rewrite ported to 2.5 - Alan Cox <alan@redhat.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#error Please convert me to Documentation/DMA-mapping.txt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
......@@ -47,7 +47,7 @@ static void *page_buf;
static spinlock_t i2o_config_lock = SPIN_LOCK_UNLOCKED;
struct wait_queue *i2o_wait_queue;
#define MODINC(x,y) (x = x++ % y)
#define MODINC(x,y) ((x) = ((x) + 1) % (y))
struct i2o_cfg_info
{
......@@ -279,6 +279,13 @@ int ioctl_getiops(unsigned long arg)
if(c)
{
foo[i] = 1;
if(pci_set_dma_mask(c->pdev, 0xffffffff))
{
printk(KERN_WARNING "i2o_config : No suitable DMA available on controller %d\n", i);
i2o_unlock_controller(c);
continue;
}
i2o_unlock_controller(c);
}
else
......@@ -445,11 +452,12 @@ int ioctl_html(unsigned long arg)
struct i2o_controller *c;
u8 *res = NULL;
void *query = NULL;
dma_addr_t query_phys, res_phys;
int ret = 0;
int token;
u32 len;
u32 reslen;
u32 msg[MSG_FRAME_SIZE/4];
u32 msg[MSG_FRAME_SIZE];
if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_html)))
{
......@@ -475,7 +483,7 @@ int ioctl_html(unsigned long arg)
if(kcmd.qlen) /* Check for post data */
{
query = kmalloc(kcmd.qlen, GFP_KERNEL);
query = pci_alloc_consistent(c->pdev, kcmd.qlen, &query_phys);
if(!query)
{
i2o_unlock_controller(c);
......@@ -485,16 +493,16 @@ int ioctl_html(unsigned long arg)
{
i2o_unlock_controller(c);
printk(KERN_INFO "i2o_config: could not get query\n");
kfree(query);
pci_free_consistent(c->pdev, kcmd.qlen, query, query_phys);
return -EFAULT;
}
}
res = kmalloc(65536, GFP_KERNEL);
res = pci_alloc_consistent(c->pdev, 65536, &res_phys);
if(!res)
{
i2o_unlock_controller(c);
kfree(query);
pci_free_consistent(c->pdev, kcmd.qlen, query, query_phys);
return -ENOMEM;
}
......@@ -503,7 +511,7 @@ int ioctl_html(unsigned long arg)
msg[3] = 0;
msg[4] = kcmd.page;
msg[5] = 0xD0000000|65536;
msg[6] = virt_to_bus(res);
msg[6] = res_phys;
if(!kcmd.qlen) /* Check for post data */
msg[0] = SEVEN_WORD_MSG_SIZE|SGL_OFFSET_5;
else
......@@ -511,7 +519,7 @@ int ioctl_html(unsigned long arg)
msg[0] = NINE_WORD_MSG_SIZE|SGL_OFFSET_5;
msg[5] = 0x50000000|65536;
msg[7] = 0xD4000000|(kcmd.qlen);
msg[8] = virt_to_bus(query);
msg[8] = query_phys;
}
/*
Wait for a considerable time till the Controller
......@@ -519,7 +527,7 @@ int ioctl_html(unsigned long arg)
take more time to process this request if there are
many devices connected to it.
*/
token = i2o_post_wait_mem(c, msg, 9*4, 400, query, res);
token = i2o_post_wait_mem(c, msg, 9*4, 400, query, res, query_phys, res_phys, kcmd.qlen, 65536);
if(token < 0)
{
printk(KERN_DEBUG "token = %#10x\n", token);
......@@ -527,10 +535,10 @@ int ioctl_html(unsigned long arg)
if(token != -ETIMEDOUT)
{
kfree(res);
if(kcmd.qlen) kfree(query);
pci_free_consistent(c->pdev, 65536, res, res_phys);
if(kcmd.qlen)
pci_free_consistent(c->pdev, kcmd.qlen, query, query_phys);
}
return token;
}
i2o_unlock_controller(c);
......@@ -542,9 +550,9 @@ int ioctl_html(unsigned long arg)
if(copy_to_user(kcmd.resbuf, res, len))
ret = -EFAULT;
kfree(res);
pci_free_consistent(c->pdev, 65536, res, res_phys);
if(kcmd.qlen)
kfree(query);
pci_free_consistent(c->pdev, kcmd.qlen, query, query_phys);
return ret;
}
......@@ -558,6 +566,7 @@ int ioctl_swdl(unsigned long arg)
u32 msg[9];
unsigned int status = 0, swlen = 0, fragsize = 8192;
struct i2o_controller *c;
dma_addr_t buffer_phys;
if(copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
return -EFAULT;
......@@ -580,7 +589,7 @@ int ioctl_swdl(unsigned long arg)
if(!c)
return -ENXIO;
buffer=kmalloc(fragsize, GFP_KERNEL);
buffer=pci_alloc_consistent(c->pdev, fragsize, &buffer_phys);
if (buffer==NULL)
{
i2o_unlock_controller(c);
......@@ -597,14 +606,14 @@ int ioctl_swdl(unsigned long arg)
msg[5]= swlen;
msg[6]= kxfer.sw_id;
msg[7]= (0xD0000000 | fragsize);
msg[8]= virt_to_bus(buffer);
msg[8]= buffer_phys;
// printk("i2o_config: swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
status = i2o_post_wait_mem(c, msg, sizeof(msg), 60, buffer, NULL);
status = i2o_post_wait_mem(c, msg, sizeof(msg), 60, buffer, NULL, buffer_phys, 0, fragsize, 0);
i2o_unlock_controller(c);
if(status != -ETIMEDOUT)
kfree(buffer);
pci_free_consistent(c->pdev, fragsize, buffer, buffer_phys);
if (status != I2O_POST_WAIT_OK)
{
......@@ -626,6 +635,7 @@ int ioctl_swul(unsigned long arg)
u32 msg[9];
unsigned int status = 0, swlen = 0, fragsize = 8192;
struct i2o_controller *c;
dma_addr_t buffer_phys;
if(copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
return -EFAULT;
......@@ -648,7 +658,7 @@ int ioctl_swul(unsigned long arg)
if(!c)
return -ENXIO;
buffer=kmalloc(fragsize, GFP_KERNEL);
buffer=pci_alloc_consistent(c->pdev, fragsize, &buffer_phys);
if (buffer==NULL)
{
i2o_unlock_controller(c);
......@@ -663,22 +673,22 @@ int ioctl_swul(unsigned long arg)
msg[5]= swlen;
msg[6]= kxfer.sw_id;
msg[7]= (0xD0000000 | fragsize);
msg[8]= virt_to_bus(buffer);
msg[8]= buffer_phys;
// printk("i2o_config: swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
status = i2o_post_wait_mem(c, msg, sizeof(msg), 60, buffer, NULL);
status = i2o_post_wait_mem(c, msg, sizeof(msg), 60, buffer, NULL, buffer_phys, 0, fragsize, 0);
i2o_unlock_controller(c);
if (status != I2O_POST_WAIT_OK)
{
if(status != -ETIMEDOUT)
kfree(buffer);
pci_free_consistent(c->pdev, fragsize, buffer, buffer_phys);
printk(KERN_INFO "i2o_config: swul failed, DetailedStatus = %d\n", status);
return status;
}
__copy_to_user(kxfer.buf, buffer, fragsize);
kfree(buffer);
pci_free_consistent(c->pdev, fragsize, buffer, buffer_phys);
return 0;
}
......@@ -849,6 +859,7 @@ static int cfg_release(struct inode *inode, struct file *file)
struct i2o_cfg_info *p1, *p2;
unsigned long flags;
lock_kernel();
p1 = p2 = NULL;
spin_lock_irqsave(&i2o_config_lock, flags);
......@@ -871,6 +882,7 @@ static int cfg_release(struct inode *inode, struct file *file)
p1 = p1->next;
}
spin_unlock_irqrestore(&i2o_config_lock, flags);
unlock_kernel();
return 0;
}
......@@ -908,11 +920,7 @@ static struct miscdevice i2o_miscdev = {
&config_fops
};
#ifdef MODULE
int init_module(void)
#else
int __init i2o_config_init(void)
#endif
static int __init i2o_config_init(void)
{
printk(KERN_INFO "I2O configuration manager v 0.04.\n");
printk(KERN_INFO " (C) Copyright 1999 Red Hat Software\n");
......@@ -946,9 +954,7 @@ int __init i2o_config_init(void)
return 0;
}
#ifdef MODULE
void cleanup_module(void)
static void i2o_config_exit(void)
{
misc_deregister(&i2o_miscdev);
......@@ -958,8 +964,10 @@ void cleanup_module(void)
i2o_remove_handler(&cfg_handler);
}
EXPORT_NO_SYMBOLS;
MODULE_AUTHOR("Red Hat Software");
MODULE_DESCRIPTION("I2O Configuration");
MODULE_LICENSE("GPL");
#endif
module_init(i2o_config_init);
module_exit(i2o_config_exit);
This diff is collapsed.
This diff is collapsed.
......@@ -45,6 +45,7 @@
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/tqueue.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
......
......@@ -81,9 +81,9 @@ struct i2o_device
struct i2o_pci
{
int irq;
int queue_buggy:3; /* Don't send a lot of messages */
int short_req:1; /* Use small block sizes */
int dpt:1; /* Don't quiesce */
int promise:1; /* Promise controller */
#ifdef CONFIG_MTRR
int mtrr_reg0;
int mtrr_reg1;
......@@ -112,9 +112,9 @@ struct i2o_controller
atomic_t users;
struct i2o_device *devices; /* I2O device chain */
struct i2o_controller *next; /* Controller chain */
volatile u32 *post_port; /* Inbout port */
volatile u32 *reply_port; /* Outbound port */
volatile u32 *irq_mask; /* Interrupt register */
unsigned long post_port; /* Inbout port address */
unsigned long reply_port; /* Outbound port address */
unsigned long irq_mask; /* Interrupt register address */
/* Dynamic LCT related data */
struct semaphore lct_sem;
......@@ -122,12 +122,17 @@ struct i2o_controller
int lct_running;
i2o_status_block *status_block; /* IOP status block */
dma_addr_t status_block_phys;
i2o_lct *lct; /* Logical Config Table */
dma_addr_t lct_phys;
i2o_lct *dlct; /* Temp LCT */
dma_addr_t dlct_phys;
i2o_hrt *hrt; /* HW Resource Table */
dma_addr_t hrt_phys;
u32 hrt_len;
u32 mem_offset; /* MFA offset */
u32 mem_phys; /* MFA physical */
unsigned long mem_offset; /* MFA offset */
unsigned long mem_phys; /* MFA physical */
int battery:1; /* Has a battery backup */
int io_alloc:1; /* An I/O resource was allocated */
......@@ -252,34 +257,34 @@ struct i2o_sys_tbl
*/
static inline u32 I2O_POST_READ32(struct i2o_controller *c)
{
return *c->post_port;
return readl(c->post_port);
}
static inline void I2O_POST_WRITE32(struct i2o_controller *c, u32 Val)
static inline void I2O_POST_WRITE32(struct i2o_controller *c, u32 val)
{
*c->post_port = Val;
writel(val, c->post_port);
}
static inline u32 I2O_REPLY_READ32(struct i2o_controller *c)
{
return *c->reply_port;
return readl(c->reply_port);
}
static inline void I2O_REPLY_WRITE32(struct i2o_controller *c, u32 Val)
static inline void I2O_REPLY_WRITE32(struct i2o_controller *c, u32 val)
{
*c->reply_port = Val;
writel(val, c->reply_port);
}
static inline u32 I2O_IRQ_READ32(struct i2o_controller *c)
{
return *c->irq_mask;
return readl(c->irq_mask);
}
static inline void I2O_IRQ_WRITE32(struct i2o_controller *c, u32 Val)
static inline void I2O_IRQ_WRITE32(struct i2o_controller *c, u32 val)
{
*c->irq_mask = Val;
writel(val, c->irq_mask);
}
......@@ -295,6 +300,13 @@ static inline void i2o_flush_reply(struct i2o_controller *c, u32 m)
I2O_REPLY_WRITE32(c, m);
}
/*
* Endian handling wrapped into the macro - keeps the core code
* cleaner.
*/
#define i2o_raw_writel(val, mem) __raw_writel(cpu_to_le32(val), mem)
extern struct i2o_controller *i2o_find_controller(int);
extern void i2o_unlock_controller(struct i2o_controller *);
extern struct i2o_controller *i2o_controller_chain;
......@@ -313,7 +325,7 @@ extern int i2o_device_notify_off(struct i2o_device *,
extern int i2o_post_this(struct i2o_controller *, u32 *, int);
extern int i2o_post_wait(struct i2o_controller *, u32 *, int, int);
extern int i2o_post_wait_mem(struct i2o_controller *, u32 *, int, int,
void *, void *);
void *, void *, dma_addr_t, dma_addr_t, int, int);
extern int i2o_query_scalar(struct i2o_controller *, int, int, int, void *,
int);
......@@ -339,6 +351,59 @@ extern int i2o_activate_controller(struct i2o_controller *);
extern void i2o_run_queue(struct i2o_controller *);
extern int i2o_delete_controller(struct i2o_controller *);
/*
* Cache strategies
*/
/* The NULL strategy leaves everything up to the controller. This tends to be a
* pessimal but functional choice.
*/
#define CACHE_NULL 0
/* Prefetch data when reading. We continually attempt to load the next 32 sectors
* into the controller cache.
*/
#define CACHE_PREFETCH 1
/* Prefetch data when reading. We sometimes attempt to load the next 32 sectors
* into the controller cache. When an I/O is less <= 8K we assume its probably
* not sequential and don't prefetch (default)
*/
#define CACHE_SMARTFETCH 2
/* Data is written to the cache and then out on to the disk. The I/O must be
* physically on the medium before the write is acknowledged (default without
* NVRAM)
*/
#define CACHE_WRITETHROUGH 17
/* Data is written to the cache and then out on to the disk. The controller
* is permitted to write back the cache any way it wants. (default if battery
* backed NVRAM is present). It can be useful to set this for swap regardless of
* battery state.
*/
#define CACHE_WRITEBACK 18
/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
* write large I/O's directly to disk bypassing the cache to avoid the extra
* memory copy hits. Small writes are writeback cached
*/
#define CACHE_SMARTBACK 19
/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
* write large I/O's directly to disk bypassing the cache to avoid the extra
* memory copy hits. Small writes are writethrough cached. Suitable for devices
* lacking battery backup
*/
#define CACHE_SMARTTHROUGH 20
/*
* Ioctl structures
*/
#define BLKI2OGRSTRAT _IOR('2', 1, int)
#define BLKI2OGWSTRAT _IOR('2', 2, int)
#define BLKI2OSRSTRAT _IOW('2', 3, int)
#define BLKI2OSWSTRAT _IOW('2', 4, int)
/*
* I2O Function codes
......@@ -416,6 +481,7 @@ extern int i2o_delete_controller(struct i2o_controller *);
#define I2O_CMD_BLOCK_MUNLOCK 0x4B
#define I2O_CMD_BLOCK_MMOUNT 0x41
#define I2O_CMD_BLOCK_MEJECT 0x43
#define I2O_CMD_BLOCK_POWER 0x70
#define I2O_PRIVATE_MSG 0xFF
......@@ -574,6 +640,7 @@ extern int i2o_delete_controller(struct i2o_controller *);
#define EIGHT_WORD_MSG_SIZE 0x00080000
#define NINE_WORD_MSG_SIZE 0x00090000
#define TEN_WORD_MSG_SIZE 0x000A0000
#define ELEVEN_WORD_MSG_SIZE 0x000B0000
#define I2O_MESSAGE_SIZE(x) ((x)<<16)
......@@ -582,10 +649,10 @@ extern int i2o_delete_controller(struct i2o_controller *);
#define ADAPTER_TID 0
#define HOST_TID 1
#define MSG_FRAME_SIZE 128
#define MSG_FRAME_SIZE 64 /* i2o_scsi assumes >= 32 */
#define NMBR_MSG_FRAMES 128
#define MSG_POOL_SIZE 16384
#define MSG_POOL_SIZE (MSG_FRAME_SIZE*NMBR_MSG_FRAMES*sizeof(u32))
#define I2O_POST_WAIT_OK 0
#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment