Commit 65c113cf authored by Steve French's avatar Steve French

Merge bk://linux.bkbits.net/linux-2.5

into hostme.bitkeeper.com:/repos/c/cifs/linux-2.5cifs
parents df31c021 9a276d67
......@@ -444,4 +444,83 @@ dma_alloc_noncoherent(), starting at virtual address vaddr and
continuing on for size. Again, you *must* observe the cache line
boundaries when doing this.
int
dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dma_addr_t device_addr, size_t size, int
flags)
Declare region of memory to be handed out by dma_alloc_coherent when
it's asked for coherent memory for this device.
bus_addr is the physical address to which the memory is currently
assigned in the bus responding region (this will be used by the
platform to perform the mapping)
device_addr is the physical address the device needs to be programmed
with actually to address this memory (this will be handed out as the
dma_addr_t in dma_alloc_coherent())
size is the size of the area (must be multiples of PAGE_SIZE).
flags can be or'd together and are
DMA_MEMORY_MAP - request that the memory returned from
dma_alloc_coherent() be directly writeable.
DMA_MEMORY_IO - request that the memory returned from
dma_alloc_coherent() be addressable using read/write/memcpy_toio etc.
One or both of these flags must be present
DMA_MEMORY_INCLUDES_CHILDREN - make the declared memory be allocated by
dma_alloc_coherent of any child devices of this one (for memory residing
on a bridge).
DMA_MEMORY_EXCLUSIVE - only allocate memory from the declared regions.
Do not allow dma_alloc_coherent() to fall back to system memory when
it's out of memory in the declared region.
The return value will be either DMA_MEMORY_MAP or DMA_MEMORY_IO and
must correspond to a passed in flag (i.e. no returning DMA_MEMORY_IO
if only DMA_MEMORY_MAP were passed in) for success or zero for
failure.
Note, for DMA_MEMORY_IO returns, all subsequent memory returned by
dma_alloc_coherent() may no longer be accessed directly, but instead
must be accessed using the correct bus functions. If your driver
isn't prepared to handle this contingency, it should not specify
DMA_MEMORY_IO in the input flags.
As a simplification for the platforms, only *one* such region of
memory may be declared per device.
For reasons of efficiency, most platforms choose to track the declared
region only at the granularity of a page. For smaller allocations,
you should use the dma_pool() API.
void
dma_release_declared_memory(struct device *dev)
Remove the memory region previously declared from the system. This
API performs *no* in-use checking for this region and will return
unconditionally having removed all the required structures. It is the
drivers job to ensure that no parts of this memory region are
currently in use.
void *
dma_mark_declared_memory_occupied(struct device *dev,
dma_addr_t device_addr, size_t size)
This is used to occupy specific regions of the declared space
(dma_alloc_coherent() will hand out the first free region it finds).
device_addr is the *device* address of the region requested
size is the size (and should be a page sized multiple).
The return value will be either a pointer to the processor virtual
address of the memory, or an error (via PTR_ERR()) if any part of the
region is occupied.
......@@ -13,17 +13,40 @@
#include <linux/pci.h>
#include <asm/io.h>
struct dma_coherent_mem {
void *virt_base;
u32 device_base;
int size;
int flags;
unsigned long *bitmap;
};
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, int gfp)
{
void *ret;
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
int order = get_order(size);
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (mem) {
int page = bitmap_find_free_region(mem->bitmap, mem->size,
order);
if (page >= 0) {
*dma_handle = mem->device_base + (page << PAGE_SHIFT);
ret = mem->virt_base + (page << PAGE_SHIFT);
memset(ret, 0, size);
return ret;
}
if (mem->flags & DMA_MEMORY_EXCLUSIVE)
return NULL;
}
if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
gfp |= GFP_DMA;
ret = (void *)__get_free_pages(gfp, get_order(size));
ret = (void *)__get_free_pages(gfp, order);
if (ret != NULL) {
memset(ret, 0, size);
......@@ -35,5 +58,89 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
free_pages((unsigned long)vaddr, get_order(size));
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
int order = get_order(size);
if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
bitmap_release_region(mem->bitmap, page, order);
} else
free_pages((unsigned long)vaddr, order);
}
int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dma_addr_t device_addr, size_t size, int flags)
{
void *mem_base;
int pages = size >> PAGE_SHIFT;
int bitmap_size = (pages + 31)/32;
if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
goto out;
if (!size)
goto out;
if (dev->dma_mem)
goto out;
/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
mem_base = ioremap(bus_addr, size);
if (!mem_base)
goto out;
dev->dma_mem = kmalloc(GFP_KERNEL, sizeof(struct dma_coherent_mem));
if (!dev->dma_mem)
goto out;
memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
dev->dma_mem->bitmap = kmalloc(GFP_KERNEL, bitmap_size);
if (!dev->dma_mem->bitmap)
goto free1_out;
memset(dev->dma_mem->bitmap, 0, bitmap_size);
dev->dma_mem->virt_base = mem_base;
dev->dma_mem->device_base = device_addr;
dev->dma_mem->size = pages;
dev->dma_mem->flags = flags;
if (flags & DMA_MEMORY_MAP)
return DMA_MEMORY_MAP;
return DMA_MEMORY_IO;
free1_out:
kfree(dev->dma_mem->bitmap);
out:
return 0;
}
EXPORT_SYMBOL(dma_declare_coherent_memory);
void dma_release_declared_memory(struct device *dev)
{
struct dma_coherent_mem *mem = dev->dma_mem;
if(!mem)
return;
dev->dma_mem = NULL;
kfree(mem->bitmap);
kfree(mem);
}
EXPORT_SYMBOL(dma_release_declared_memory);
void *dma_mark_declared_memory_occupied(struct device *dev,
dma_addr_t device_addr, size_t size)
{
struct dma_coherent_mem *mem = dev->dma_mem;
int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
int pos, err;
if (!mem)
return ERR_PTR(-EINVAL);
pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
if (err != 0)
return ERR_PTR(err);
return mem->virt_base + (pos << PAGE_SHIFT);
}
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
/* $Id: capifunc.h,v 1.11 2004/03/20 17:19:58 armin Exp $
/* $Id: capifunc.h,v 1.11.4.1 2004/08/28 20:03:53 armin Exp $
*
* ISDN interface module for Eicon active cards DIVA.
* CAPI Interface common functions
......@@ -13,8 +13,6 @@
#ifndef __CAPIFUNC_H__
#define __CAPIFUNC_H__
#define MAX_DESCRIPTORS 32
#define DRRELMAJOR 2
#define DRRELMINOR 0
#define DRRELEXTRA ""
......
This diff is collapsed.
......@@ -57,12 +57,18 @@ void diva_mnt_remove_xdi_adapter (const DESCRIPTOR* d);
void diva_mnt_add_xdi_adapter (const DESCRIPTOR* d);
int diva_mnt_shutdown_xdi_adapters (void);
#define DIVA_MAX_SELECTIVE_FILTER_LENGTH 127
int diva_set_trace_filter (int filter_length, const char* filter);
int diva_get_trace_filter (int max_length, char* filter);
#define DITRACE_CMD_GET_DRIVER_INFO 1
#define DITRACE_READ_DRIVER_DBG_MASK 2
#define DITRACE_WRITE_DRIVER_DBG_MASK 3
#define DITRACE_READ_TRACE_ENTRY 4
#define DITRACE_READ_TRACE_ENTRYS 5
#define DITRACE_WRITE_SELECTIVE_TRACE_FILTER 6
#define DITRACE_READ_SELECTIVE_TRACE_FILTER 7
/*
Trace lavels for debug via management interface
......
......@@ -232,7 +232,7 @@ typedef struct _DbgHandle_ *pDbgHandle ;
typedef void ( * DbgEnd) (pDbgHandle) ;
typedef void ( * DbgLog) (unsigned short, int, char *, va_list) ;
typedef void ( * DbgOld) (unsigned short, char *, va_list) ;
typedef void ( * DbgEv) (unsigned short, unsigned int, va_list) ;
typedef void ( * DbgEv) (unsigned short, unsigned long, va_list) ;
typedef void ( * DbgIrq) (unsigned short, int, char *, va_list) ;
typedef struct _DbgHandle_
{ char Registered ; /* driver successfull registered */
......@@ -259,7 +259,7 @@ typedef struct _DbgHandle_
void *pReserved3 ;
} _DbgHandle_ ;
extern _DbgHandle_ myDriverDebugHandle ;
typedef struct
typedef struct _OldDbgHandle_
{ struct _OldDbgHandle_ *next ;
void *pIrp ;
long regTime[2] ;
......@@ -310,7 +310,7 @@ typedef struct
unsigned long B_ChannelMask;
unsigned long LogBufferSize;
} CardTrace;
} u1;
}Data;
} _DbgExtendedInfo_;
#ifndef DIVA_NO_DEBUGLIB
/* -------------------------------------------------------------
......
/* $Id: diddfunc.c,v 1.14 2003/08/25 10:06:37 schindler Exp $
/* $Id: diddfunc.c,v 1.14.6.2 2004/08/28 20:03:53 armin Exp $
*
* DIDD Interface module for Eicon active cards.
*
......@@ -16,8 +16,6 @@
#include "dadapter.h"
#include "divasync.h"
#define MAX_DESCRIPTORS 32
#define DBG_MINIMUM (DL_LOG + DL_FTL + DL_ERR)
#define DBG_DEFAULT (DBG_MINIMUM + DL_XLOG + DL_REG)
......
/* $Id: diva_didd.c,v 1.13 2003/08/27 10:11:21 schindler Exp $
/* $Id: diva_didd.c,v 1.13.6.1 2004/08/28 20:03:53 armin Exp $
*
* DIDD Interface module for Eicon active cards.
*
......@@ -23,7 +23,7 @@
#include "divasync.h"
#include "did_vers.h"
static char *main_revision = "$Revision: 1.13 $";
static char *main_revision = "$Revision: 1.13.6.1 $";
static char *DRIVERNAME =
"Eicon DIVA - DIDD table (http://www.melware.net)";
......@@ -37,8 +37,6 @@ MODULE_AUTHOR("Cytronics & Melware, Eicon Networks");
MODULE_SUPPORTED_DEVICE("Eicon diva drivers");
MODULE_LICENSE("GPL");
#define MAX_DESCRIPTORS 32
#define DBG_MINIMUM (DL_LOG + DL_FTL + DL_ERR)
#define DBG_DEFAULT (DBG_MINIMUM + DL_XLOG + DL_REG)
......
/* $Id: divamnt.c,v 1.32 2004/01/15 09:48:13 armin Exp $
/* $Id: divamnt.c,v 1.32.6.5 2004/08/28 20:03:53 armin Exp $
*
* Driver for Eicon DIVA Server ISDN cards.
* Maint module
......@@ -17,8 +17,6 @@
#include <linux/sched.h>
#include <linux/smp_lock.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/devfs_fs_kernel.h>
#include "platform.h"
......@@ -26,7 +24,7 @@
#include "divasync.h"
#include "debug_if.h"
static char *main_revision = "$Revision: 1.32 $";
static char *main_revision = "$Revision: 1.32.6.5 $";
static int major;
......@@ -47,8 +45,7 @@ static char *DEVNAME = "DivasMAINT";
char *DRIVERRELEASE_MNT = "2.0";
static wait_queue_head_t msgwaitq;
static DECLARE_MUTEX(opened_sem);
static int opened;
static unsigned long opened;
static struct timeval start_time;
extern int mntfunc_init(int *, void **, unsigned long);
......@@ -73,20 +70,6 @@ static char *getrev(const char *revision)
return rev;
}
/*
* buffer alloc
*/
void *diva_os_malloc_tbuffer(unsigned long flags, unsigned long size)
{
return (kmalloc(size, GFP_KERNEL));
}
void diva_os_free_tbuffer(unsigned long flags, void *ptr)
{
if (ptr) {
kfree(ptr);
}
}
/*
* kernel/user space copy functions
*/
......@@ -131,154 +114,8 @@ void diva_os_get_time(dword * sec, dword * usec)
}
/*
* /proc entries
* device node operations
*/
extern struct proc_dir_entry *proc_net_eicon;
static struct proc_dir_entry *maint_proc_entry = NULL;
/*
Read function is provided for compatibility reason - this allows
to read unstructured traces, formated as ascii string only
*/
static ssize_t
maint_read(struct file *file, char __user *buf, size_t count, loff_t * off)
{
diva_dbg_entry_head_t *pmsg = NULL;
diva_os_spin_lock_magic_t old_irql;
word size;
char *pstr, *dli_label = "UNK";
int str_length;
int *str_msg;
if (!file->private_data) {
for (;;) {
while (
(pmsg =
diva_maint_get_message(&size,
&old_irql))) {
if (!(pmsg->facility == MSG_TYPE_STRING)) {
diva_maint_ack_message(1,
&old_irql);
} else {
break;
}
}
if (!pmsg) {
if (file->f_flags & O_NONBLOCK) {
return (-EAGAIN);
}
interruptible_sleep_on(&msgwaitq);
if (signal_pending(current)) {
return (-ERESTARTSYS);
}
} else {
break;
}
}
/*
The length of message that shoule be read is:
pmsg->data_length + label(25) + DrvID(2) + byte CR + trailing zero
*/
if (!
(str_msg =
(int *) diva_os_malloc_tbuffer(0,
pmsg->data_length +
29 + 2 * sizeof(int)))) {
diva_maint_ack_message(0, &old_irql);
return (-ENOMEM);
}
pstr = (char *) &str_msg[2];
switch (pmsg->dli) {
case DLI_LOG:
dli_label = "LOG";
break;
case DLI_FTL:
dli_label = "FTL";
break;
case DLI_ERR:
dli_label = "ERR";
break;
case DLI_TRC:
dli_label = "TRC";
break;
case DLI_REG:
dli_label = "REG";
break;
case DLI_MEM:
dli_label = "MEM";
break;
case DLI_SPL:
dli_label = "SPL";
break;
case DLI_IRP:
dli_label = "IRP";
break;
case DLI_TIM:
dli_label = "TIM";
break;
case DLI_TAPI:
dli_label = "TAPI";
break;
case DLI_NDIS:
dli_label = "NDIS";
break;
case DLI_CONN:
dli_label = "CONN";
break;
case DLI_STAT:
dli_label = "STAT";
break;
case DLI_PRV0:
dli_label = "PRV0";
break;
case DLI_PRV1:
dli_label = "PRV1";
break;
case DLI_PRV2:
dli_label = "PRV2";
break;
case DLI_PRV3:
dli_label = "PRV3";
break;
}
str_length = sprintf(pstr, "%s %02x %s\n",
dli_label, (byte) pmsg->drv_id,
(char *) &pmsg[1]);
str_msg[0] = str_length;
str_msg[1] = 0;
file->private_data = str_msg;
diva_maint_ack_message(1, &old_irql);
} else {
str_msg = (int *) file->private_data;
pstr = (char *) &str_msg[2];
pstr += str_msg[1]; /* head + offset */
str_length = str_msg[0] - str_msg[1]; /* length - offset */
}
str_length = MIN(str_length, count);
if (diva_os_copy_to_user(NULL, buf, pstr, str_length)) {
diva_os_free_tbuffer(0, str_msg);
file->private_data = NULL;
return (-EFAULT);
}
str_msg[1] += str_length;
if ((str_msg[0] - str_msg[1]) <= 0) {
diva_os_free_tbuffer(0, str_msg);
file->private_data = NULL;
}
return (str_length);
}
static ssize_t
maint_write(struct file *file, const char __user *buf, size_t count, loff_t * off)
{
return (-ENODEV);
}
static unsigned int maint_poll(struct file *file, poll_table * wait)
{
unsigned int mask = 0;
......@@ -293,13 +130,10 @@ static unsigned int maint_poll(struct file *file, poll_table * wait)
static int maint_open(struct inode *ino, struct file *filep)
{
down(&opened_sem);
if (opened) {
up(&opened_sem);
/* only one open is allowed, so we test
it atomically */
if (test_and_set_bit(0, &opened))
return (-EBUSY);
}
opened++;
up(&opened_sem);
filep->private_data = NULL;
......@@ -309,54 +143,16 @@ static int maint_open(struct inode *ino, struct file *filep)
static int maint_close(struct inode *ino, struct file *filep)
{
if (filep->private_data) {
diva_os_free_tbuffer(0, filep->private_data);
filep->private_data = NULL;
diva_os_free(0, filep->private_data);
filep->private_data = 0;
}
down(&opened_sem);
opened--;
up(&opened_sem);
/* clear 'used' flag */
clear_bit(0, &opened);
return (0);
}
/*
* fops
*/
static struct file_operations maint_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = maint_read,
.write = maint_write,
.poll = maint_poll,
.open = maint_open,
.release = maint_close
};
static int DIVA_INIT_FUNCTION create_maint_proc(void)
{
maint_proc_entry =
create_proc_entry("maint", S_IFREG | S_IRUGO | S_IWUSR,
proc_net_eicon);
if (!maint_proc_entry)
return (0);
maint_proc_entry->proc_fops = &maint_fops;
maint_proc_entry->owner = THIS_MODULE;
return (1);
}
static void remove_maint_proc(void)
{
if (maint_proc_entry) {
remove_proc_entry("maint", proc_net_eicon);
maint_proc_entry = NULL;
}
}
/*
* device node operations
*/
static ssize_t divas_maint_write(struct file *file, const char __user *buf,
size_t count, loff_t * ppos)
{
......@@ -427,18 +223,10 @@ static int DIVA_INIT_FUNCTION maint_init(void)
ret = -EIO;
goto out;
}
if (!create_maint_proc()) {
printk(KERN_ERR "%s: failed to create proc entry.\n",
DRIVERLNAME);
divas_maint_unregister_chrdev();
ret = -EIO;
goto out;
}
if (!(mntfunc_init(&buffer_length, &buffer, diva_dbg_mem))) {
printk(KERN_ERR "%s: failed to connect to DIDD.\n",
DRIVERLNAME);
remove_maint_proc();
divas_maint_unregister_chrdev();
ret = -EIO;
goto out;
......@@ -457,7 +245,6 @@ static int DIVA_INIT_FUNCTION maint_init(void)
*/
static void DIVA_EXIT_FUNCTION maint_exit(void)
{
remove_maint_proc();
divas_maint_unregister_chrdev();
mntfunc_finit();
......@@ -466,3 +253,4 @@ static void DIVA_EXIT_FUNCTION maint_exit(void)
module_init(maint_init);
module_exit(maint_exit);
/* $Id: divasfunc.c,v 1.23 2004/04/08 01:17:57 armin Exp $
/* $Id: divasfunc.c,v 1.23.4.2 2004/08/28 20:03:53 armin Exp $
*
* Low level driver for Eicon DIVA Server ISDN cards.
*
......@@ -27,8 +27,6 @@ extern void DIVA_DIDD_Read(void *, int);
extern PISDN_ADAPTER IoAdapters[MAX_ADAPTER];
#define MAX_DESCRIPTORS 32
extern char *DRIVERRELEASE_DIVAS;
static dword notify_handle;
......@@ -76,10 +74,10 @@ void diva_xdi_didd_register_adapter(int card)
d.features = IoAdapters[card - 1]->Properties.Features;
DBG_TRC(("DIDD register A(%d) channels=%d", card,
d.channels))
/* workaround for different Name in structure */
strlcpy(IoAdapters[card - 1]->Name,
IoAdapters[card - 1]->Properties.Name,
sizeof(IoAdapters[card - 1]->Name));
/* workaround for different Name in structure */
strlcpy(IoAdapters[card - 1]->Name,
IoAdapters[card - 1]->Properties.Name,
sizeof(IoAdapters[card - 1]->Name));
req.didd_remove_adapter.e.Req = 0;
req.didd_add_adapter.e.Rc = IDI_SYNC_REQ_DIDD_ADD_ADAPTER;
req.didd_add_adapter.info.descriptor = (void *) &d;
......
......@@ -31,33 +31,31 @@
#define IDI_SYNC_REQ_SET_POSTCALL 0x03
#define IDI_SYNC_REQ_GET_XLOG 0x04
#define IDI_SYNC_REQ_GET_FEATURES 0x05
/* Added for DIVA USB support */
#define IDI_SYNC_REQ_USB_REGISTER 0x06
#define IDI_SYNC_REQ_USB_RELEASE 0x07
#define IDI_SYNC_REQ_USB_ADD_DEVICE 0x08
#define IDI_SYNC_REQ_USB_START_DEVICE 0x09
#define IDI_SYNC_REQ_USB_STOP_DEVICE 0x0A
#define IDI_SYNC_REQ_USB_REMOVE_DEVICE 0x0B
/* Added for Diva Server Monitor */
#define IDI_SYNC_REQ_GET_CARDTYPE 0x0C
#define IDI_SYNC_REQ_GET_DBG_XLOG 0x0D
#define IDI_SYNC_REQ_GET_LINE_IDX 0x0E
#define DIVA_USB
#define DIVA_USB_REQ 0xAC
#define DIVA_USB_TEST 0xAB
#define DIVA_USB_ADD_ADAPTER 0xAC
#define DIVA_USB_REMOVE_ADAPTER 0xAD
/******************************************************************************/
#define IDI_SYNC_REQ_SERIAL_HOOK 0x80
#define IDI_SYNC_REQ_XCHANGE_STATUS 0x81
#define IDI_SYNC_REQ_USB_HOOK 0x82
#define IDI_SYNC_REQ_PORTDRV_HOOK 0x83
#define IDI_SYNC_REQ_SLI (0x84) /* SLI request from 3signal modem drivers */
#define IDI_SYNC_REQ_SLI 0x84 /* SLI request from 3signal modem drivers */
#define IDI_SYNC_REQ_RECONFIGURE 0x85
#define IDI_SYNC_REQ_RESET 0x86
#define IDI_SYNC_REQ_GET_85X_DEVICE_DATA 0x87
#define IDI_SYNC_REQ_LOCK_85X 0x88
#define IDI_SYNC_REQ_DIVA_85X_USB_DATA_EXCHANGE 0x99
#define IDI_SYNC_REQ_DIPORT_EXCHANGE_REQ 0x98
#define IDI_SYNC_REQ_GET_85X_EXT_PORT_TYPE 0xA0
#define IDI_SYNC_REQ_DIPORT_GET_85X_TX_CTRL_FN 0x98
/******************************************************************************/
#define IDI_SYNC_REQ_XDI_GET_EXTENDED_FEATURES 0x92
/*
......@@ -87,6 +85,8 @@ typedef struct _diva_xdi_get_extended_xdi_features {
#define DIVA_XDI_EXTENDED_FEATURE_CAPI_PRMS 0x08
#define DIVA_XDI_EXTENDED_FEATURE_NO_CANCEL_RC 0x10
#define DIVA_XDI_EXTENDED_FEATURE_RX_DMA 0x20
#define DIVA_XDI_EXTENDED_FEATURE_MANAGEMENT_DMA 0x40
#define DIVA_XDI_EXTENDED_FEATURE_WIDE_ID 0x80
#define DIVA_XDI_EXTENDED_FEATURES_MAX_SZ 1
/******************************************************************************/
#define IDI_SYNC_REQ_XDI_GET_ADAPTER_SDRAM_BAR 0x93
......@@ -115,6 +115,7 @@ typedef struct _diva_xdi_get_capi_parameters {
typedef struct _diva_xdi_get_logical_adapter_number {
dword logical_adapter_number;
dword controller;
dword total_controllers;
} diva_xdi_get_logical_adapter_number_s_t;
/******************************************************************************/
#define IDI_SYNC_REQ_UP1DM_OPERATION 0x96
......@@ -134,6 +135,7 @@ typedef struct _diva_xdi_dma_descriptor_operation {
#define IDI_SYNC_REQ_DIDD_ADD_ADAPTER 0x03
#define IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER 0x04
#define IDI_SYNC_REQ_DIDD_READ_ADAPTER_ARRAY 0x05
#define IDI_SYNC_REQ_DIDD_GET_CFG_LIB_IFC 0x10
typedef struct _diva_didd_adapter_notify {
dword handle; /* Notification handle */
void * callback;
......@@ -149,6 +151,9 @@ typedef struct _diva_didd_read_adapter_array {
void * buffer;
dword length;
} diva_didd_read_adapter_array_t;
typedef struct _diva_didd_get_cfg_lib_ifc {
void* ifc;
} diva_didd_get_cfg_lib_ifc_t;
/******************************************************************************/
#define IDI_SYNC_REQ_XDI_GET_STREAM 0x91
#define DIVA_XDI_SYNCHRONOUS_SERVICE 0x01
......@@ -466,6 +471,10 @@ typedef union
ENTITY e;
diva_didd_read_adapter_array_t info;
} didd_read_adapter_array;
struct {
ENTITY e;
diva_didd_get_cfg_lib_ifc_t info;
} didd_get_cfg_lib_ifc;
struct {
unsigned char Req;
unsigned char Rc;
......
/* $Id: idifunc.c,v 1.14.4.2 2004/05/09 16:42:20 armin Exp $
/* $Id: idifunc.c,v 1.14.4.4 2004/08/28 20:03:53 armin Exp $
*
* Driver for Eicon DIVA Server ISDN cards.
* User Mode IDI Interface
......@@ -25,8 +25,6 @@ extern void DIVA_DIDD_Read(void *, int);
extern int diva_user_mode_idi_create_adapter(const DESCRIPTOR *, int);
extern void diva_user_mode_idi_remove_adapter(int);
#define MAX_DESCRIPTORS 32
static dword notify_handle;
static DESCRIPTOR DAdapter;
static DESCRIPTOR MAdapter;
......
......@@ -77,6 +77,7 @@ static byte extended_xdi_features[DIVA_XDI_EXTENDED_FEATURES_MAX_SZ+1] = {
#if defined(DIVA_IDI_RX_DMA)
DIVA_XDI_EXTENDED_FEATURE_CMA |
DIVA_XDI_EXTENDED_FEATURE_RX_DMA |
DIVA_XDI_EXTENDED_FEATURE_MANAGEMENT_DMA |
#endif
DIVA_XDI_EXTENDED_FEATURE_NO_CANCEL_RC),
0
......@@ -226,8 +227,10 @@ void request(PISDN_ADAPTER IoAdapter, ENTITY * e)
if (pI->descriptor_number >= 0) {
dword dma_magic;
void* local_addr;
#if 0
DBG_TRC(("A(%d) dma_alloc(%d)",
IoAdapter->ANum, pI->descriptor_number))
#endif
diva_get_dma_map_entry (\
(struct _diva_dma_map_entry*)IoAdapter->dma_map,
pI->descriptor_number,
......@@ -240,7 +243,9 @@ void request(PISDN_ADAPTER IoAdapter, ENTITY * e)
}
} else if ((pI->operation == IDI_SYNC_REQ_DMA_DESCRIPTOR_FREE) &&
(pI->descriptor_number >= 0)) {
#if 0
DBG_TRC(("A(%d) dma_free(%d)", IoAdapter->ANum, pI->descriptor_number))
#endif
diva_free_dma_map_entry((struct _diva_dma_map_entry*)IoAdapter->dma_map,
pI->descriptor_number);
pI->descriptor_number = -1;
......@@ -257,6 +262,7 @@ void request(PISDN_ADAPTER IoAdapter, ENTITY * e)
&syncReq->xdi_logical_adapter_number.info;
pI->logical_adapter_number = IoAdapter->ANum;
pI->controller = IoAdapter->ControllerNumber;
pI->total_controllers = IoAdapter->Properties.Adapters;
} return;
case IDI_SYNC_REQ_XDI_GET_CAPI_PARAMS: {
diva_xdi_get_capi_parameters_t prms, *pI = &syncReq->xdi_capi_prms.info;
......@@ -318,6 +324,16 @@ void request(PISDN_ADAPTER IoAdapter, ENTITY * e)
}
syncReq->GetSerial.serial = 0 ;
break ;
case IDI_SYNC_REQ_GET_CARDTYPE:
if ( IoAdapter )
{
syncReq->GetCardType.cardtype = IoAdapter->cardType ;
DBG_TRC(("xdi: Adapter %d / CardType %ld",
IoAdapter->ANum, IoAdapter->cardType))
return ;
}
syncReq->GetCardType.cardtype = 0 ;
break ;
case IDI_SYNC_REQ_GET_XLOG:
if ( IoAdapter )
{
......@@ -326,6 +342,14 @@ void request(PISDN_ADAPTER IoAdapter, ENTITY * e)
}
e->Ind = 0 ;
break ;
case IDI_SYNC_REQ_GET_DBG_XLOG:
if ( IoAdapter )
{
pcm_req (IoAdapter, e) ;
return ;
}
e->Ind = 0 ;
break ;
case IDI_SYNC_REQ_GET_FEATURES:
if ( IoAdapter )
{
......@@ -345,7 +369,9 @@ void request(PISDN_ADAPTER IoAdapter, ENTITY * e)
}
if ( IoAdapter )
{
#if 0
DBG_FTL(("xdi: unknown Req 0 / Rc %d !", e->Rc))
#endif
return ;
}
}
......@@ -496,7 +522,7 @@ pcm_req (PISDN_ADAPTER IoAdapter, ENTITY *e)
diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
&OldIrql,
"data_pcm_1");
IoAdapter->pcm_data = (unsigned long)pcm;
IoAdapter->pcm_data = (void *)pcm;
IoAdapter->pcm_pending = 1;
diva_os_schedule_soft_isr (&IoAdapter->req_soft_isr);
diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
......@@ -510,7 +536,7 @@ pcm_req (PISDN_ADAPTER IoAdapter, ENTITY *e)
&OldIrql,
"data_pcm_3");
IoAdapter->pcm_pending = 0;
IoAdapter->pcm_data = 0;
IoAdapter->pcm_data = NULL ;
diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
&OldIrql,
"data_pcm_3");
......@@ -528,7 +554,7 @@ pcm_req (PISDN_ADAPTER IoAdapter, ENTITY *e)
&OldIrql,
"data_pcm_4");
IoAdapter->pcm_pending = 0;
IoAdapter->pcm_data = 0;
IoAdapter->pcm_data = NULL ;
diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
&OldIrql,
"data_pcm_4");
......@@ -668,7 +694,7 @@ word io_inw(ADAPTER * a, void * adr)
void io_in_buffer(ADAPTER * a, void * adr, void * buffer, word len)
{
byte *Port = (byte*)DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
byte* P = (byte*)buffer;
byte* P = (byte*)buffer;
if ((long)adr & 1) {
outppw(Port+4, (word)(unsigned long)adr);
*P = inpp(Port);
......@@ -678,7 +704,7 @@ void io_in_buffer(ADAPTER * a, void * adr, void * buffer, word len)
if (!len) {
DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
return;
}
}
}
outppw(Port+4, (word)(unsigned long)adr);
inppw_buffer (Port, P, len+1);
......@@ -710,7 +736,7 @@ void io_outw(ADAPTER * a, void * adr, word data)
void io_out_buffer(ADAPTER * a, void * adr, void * buffer, word len)
{
byte *Port = (byte*)DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
byte* P = (byte*)buffer;
byte* P = (byte*)buffer;
if ((long)adr & 1) {
outppw(Port+4, (word)(unsigned long)adr);
outpp(Port, *P);
......@@ -839,21 +865,21 @@ void CALLBACK(ADAPTER * a, ENTITY * e)
/* --------------------------------------------------------------------------
routines for aligned reading and writing on RISC
-------------------------------------------------------------------------- */
void outp_words_from_buffer (word* adr, byte* P, word len)
void outp_words_from_buffer (word* adr, byte* P, dword len)
{
word i = 0;
dword i = 0;
word w;
while (i < (len & 0xfffe)) {
while (i < (len & 0xfffffffe)) {
w = P[i++];
w += (P[i++])<<8;
outppw (adr, w);
}
}
void inp_words_to_buffer (word* adr, byte* P, word len)
void inp_words_to_buffer (word* adr, byte* P, dword len)
{
word i = 0;
dword i = 0;
word w;
while (i < (len & 0xfffe)) {
while (i < (len & 0xfffffffe)) {
w = inppw (adr);
P[i++] = (byte)(w);
P[i++] = (byte)(w>>8);
......
......@@ -39,6 +39,14 @@ typedef struct {
DEVICE_NAME DeviceName[4] ;
PISDN_ADAPTER QuadroAdapter[4] ;
} ADAPTER_LIST_ENTRY, *PADAPTER_LIST_ENTRY ;
/* --------------------------------------------------------------------------
Special OS memory support structures
-------------------------------------------------------------------------- */
#define MAX_MAPPED_ENTRIES 8
typedef struct {
void * Address;
dword Length;
} ADAPTER_MEMORY ;
/* --------------------------------------------------------------------------
Configuration of XDI clients carried by XDI
-------------------------------------------------------------------------- */
......@@ -52,6 +60,7 @@ typedef struct _diva_xdi_capi_cfg {
-------------------------------------------------------------------------- */
struct _ISDN_ADAPTER {
void (* DIRequest)(PISDN_ADAPTER, ENTITY *) ;
int State ; /* from NT4 1.srv, a good idea, but a poor achievment */
int Initialized ;
int RegisteredWithDidd ;
int Unavailable ; /* callback function possible? */
......@@ -63,6 +72,7 @@ struct _ISDN_ADAPTER {
/*
remember mapped memory areas
*/
ADAPTER_MEMORY MappedMemory[MAX_MAPPED_ENTRIES] ;
CARD_PROPERTIES Properties ;
dword cardType ;
dword protocol_id ; /* configured protocol identifier */
......@@ -87,15 +97,15 @@ struct _ISDN_ADAPTER {
dword downloadAddrTable[4] ; /* add. for MultiMaster */
dword MemoryBase ;
dword MemorySize ;
byte *Address ;
byte *Address ;
byte *Config ;
byte *Control ;
byte *reset ;
byte *port ;
byte *ram ;
byte *cfg ;
byte *prom ;
byte *ctlReg ;
byte *reset ;
byte *port ;
byte *ram ;
byte *cfg ;
byte *prom ;
byte *ctlReg ;
struct pc_maint *pcm ;
diva_os_dependent_devica_name_t os_name;
byte Name[32] ;
......@@ -105,6 +115,7 @@ struct _ISDN_ADAPTER {
char *ProtocolSuffix ; /* internal protocolfile table */
char Archive[32] ;
char Protocol[32] ;
char AddDownload[32] ; /* Dsp- or other additional download files */
char Oad1[ISDN_MAX_NUM_LEN] ;
char Osa1[ISDN_MAX_NUM_LEN] ;
char Oad2[ISDN_MAX_NUM_LEN] ;
......@@ -153,8 +164,26 @@ struct _ISDN_ADAPTER {
byte ModemCarrierWaitTimeSec;
byte ModemCarrierLossWaitTimeTenthSec;
byte PiafsLinkTurnaroundInFrames;
byte DiscAfterProgress;
byte AniDniLimiter[3];
byte TxAttenuation; /* PRI/E1 only: attenuate TX signal */
word QsigFeatures;
dword GenerateRingtone ;
dword SupplementaryServicesFeatures;
dword R2Dialect;
dword R2CasOptions;
dword FaxV34Options;
dword DisabledDspMask;
dword AdapterTestMask;
dword DspImageLength;
word AlertToIn20mSecTicks;
word ModemEyeSetup;
byte R2CtryLength;
byte CCBSRelTimer;
byte *PcCfgBufferFile;/* flexible parameter via file */
byte *PcCfgBuffer ; /* flexible parameter via multistring */
diva_os_dump_file_t dump_file; /* dump memory to file at lowest irq level */
diva_os_board_trace_t board_trace ; /* traces from the board */
diva_os_spin_lock_t isr_spin_lock;
diva_os_spin_lock_t data_spin_lock;
diva_os_soft_isr_t req_soft_isr;
......@@ -180,15 +209,21 @@ struct _ISDN_ADAPTER {
void (* stop)(PISDN_ADAPTER) ;
void (* rstFnc)(PISDN_ADAPTER) ;
void (* trapFnc)(PISDN_ADAPTER) ;
dword (* DetectDsps)(PISDN_ADAPTER) ;
void (* os_trap_nfy_Fnc)(PISDN_ADAPTER, dword) ;
diva_os_isr_callback_t diva_isr_handler;
dword sdram_bar;
dword sdram_bar; /* must be 32 bit */
dword fpga_features;
volatile int pcm_pending;
volatile unsigned long pcm_data;
volatile void * pcm_data;
diva_xdi_capi_cfg_t capi_cfg;
dword tasks;
void* dma_map;
void *dma_map;
int (*DivaAdapterTestProc)(PISDN_ADAPTER);
void *AdapterTestMemoryStart;
dword AdapterTestMemoryLength;
const byte* cfg_lib_memory_init;
dword cfg_lib_memory_init_length;
};
/* ---------------------------------------------------------------------
Entity table
......@@ -219,8 +254,8 @@ struct s_load {
/* ---------------------------------------------------------------------
Functions for port io
--------------------------------------------------------------------- */
void outp_words_from_buffer (word* adr, byte* P, word len);
void inp_words_to_buffer (word* adr, byte* P, word len);
void outp_words_from_buffer (word* adr, byte* P, dword len);
void inp_words_to_buffer (word* adr, byte* P, dword len);
/* ---------------------------------------------------------------------
platform specific conversions
--------------------------------------------------------------------- */
......@@ -240,6 +275,10 @@ void io_out(ADAPTER * a, void * adr, byte data);
void io_outw(ADAPTER * a, void * adr, word data);
void io_out_buffer(ADAPTER * a, void * adr, void * P, word length);
void io_inc(ADAPTER * a, void * adr);
void bri_in_buffer (PISDN_ADAPTER IoAdapter, dword Pos,
void *Buf, dword Len);
int bri_out_buffer (PISDN_ADAPTER IoAdapter, dword Pos,
void *Buf, dword Len, int Verify);
/* ---------------------------------------------------------------------
ram access functions for memory mapped cards
--------------------------------------------------------------------- */
......
......@@ -115,7 +115,7 @@ diva_strace_library_interface_t* DivaSTraceLibraryCreateInstance (int Adapter,
return NULL;
}
pmem += sizeof(*pLib);
pmem += sizeof(*pLib);
memset(pLib, 0x00, sizeof(*pLib));
pLib->Adapter = Adapter;
......@@ -337,13 +337,60 @@ static int SuperTraceMessageInput (void* hLib) {
pLib->e.RNum = 1;
pLib->e.R->P = (byte*)&pLib->buffer[0];
pLib->e.R->PLength = (word)(sizeof(pLib->buffer) - 1);
} else {
/*
Indication reception complete, process it now
*/
byte* p = (byte*)&pLib->buffer[0];
pLib->buffer[pLib->e.R->PLength] = 0; /* terminate I.E. with zero */
switch (Ind) {
case MAN_COMBI_IND: {
int total_length = pLib->e.R->PLength;
word this_ind_length;
while (total_length > 3 && *p) {
Ind = *p++;
this_ind_length = (word)p[0] | ((word)p[1] << 8);
p += 2;
switch (Ind) {
case MAN_INFO_IND:
if (process_idi_info (pLib, (diva_man_var_header_t*)p)) {
return (-1);
}
break;
case MAN_EVENT_IND:
if (process_idi_event (pLib, (diva_man_var_header_t*)p)) {
return (-1);
}
break;
case MAN_TRACE_IND:
if (pLib->trace_on == 1) {
/*
Ignore first trace event that is result of
EVENT_ON operation
*/
pLib->trace_on++;
} else {
/*
Delivery XLOG buffer to application
*/
if (pLib->user_proc_table.trace_proc) {
(*(pLib->user_proc_table.trace_proc))(pLib->user_proc_table.user_context,
&pLib->instance, pLib->Adapter,
p, this_ind_length);
}
}
break;
default:
diva_mnt_internal_dprintf (0, DLI_ERR, "Unknon IDI Ind (DMA mode): %02x", Ind);
}
p += (this_ind_length+1);
total_length -= (4 + this_ind_length);
}
} break;
case MAN_INFO_IND:
if (process_idi_info (pLib, (diva_man_var_header_t*)p)) {
return (-1);
......@@ -806,7 +853,7 @@ static int ScheduleNextTraceRequest (diva_strace_context_t* pLib) {
}
static int process_idi_event (diva_strace_context_t* pLib,
diva_man_var_header_t* pVar) {
diva_man_var_header_t* pVar) {
const char* path = (char*)&pVar->path_length+1;
char name[64];
int i;
......
......@@ -49,6 +49,8 @@ typedef struct _diva_strace_path2action {
void* variable; /* Variable that will receive value */
} diva_strace_path2action_t;
#define DIVA_MAX_MANAGEMENT_TRANSFER_SIZE 4096
typedef struct _diva_strace_context {
diva_strace_library_interface_t instance;
......@@ -62,7 +64,7 @@ typedef struct _diva_strace_context {
IDI_CALL request;
BUFFERS XData;
BUFFERS RData;
byte buffer[2048+512+1];
byte buffer[DIVA_MAX_MANAGEMENT_TRANSFER_SIZE + 1];
int removal_state;
int general_b_ch_event;
int general_fax_event;
......
/* $Id: mntfunc.c,v 1.19 2004/01/09 21:22:03 armin Exp $
/* $Id: mntfunc.c,v 1.19.6.2 2004/08/28 20:03:53 armin Exp $
*
* Driver for Eicon DIVA Server ISDN cards.
* Maint module
......@@ -23,17 +23,12 @@ extern char *DRIVERRELEASE_MNT;
extern void DIVA_DIDD_Read(void *, int);
#define MAX_DESCRIPTORS 32
static dword notify_handle;
static DESCRIPTOR DAdapter;
static DESCRIPTOR MAdapter;
static DESCRIPTOR MaintDescriptor =
{ IDI_DIMAINT, 0, 0, (IDI_CALL) diva_maint_prtComp };
extern void *diva_os_malloc_tbuffer(unsigned long flags,
unsigned long size);
extern void diva_os_free_tbuffer(unsigned long flags, void *ptr);
extern int diva_os_copy_to_user(void *os_handle, void __user *dst,
const void *src, int length);
extern int diva_os_copy_from_user(void *os_handle, void *dst,
......@@ -46,16 +41,6 @@ static void no_printf(unsigned char *x, ...)
#include "debuglib.c"
/*
* stop debug
*/
static void stop_dbg(void)
{
DbgDeregister();
memset(&MAdapter, 0, sizeof(MAdapter));
dprintf = no_printf;
}
/*
* DIDD callback function
*/
......@@ -66,7 +51,9 @@ static void *didd_callback(void *context, DESCRIPTOR * adapter,
DBG_ERR(("cb: Change in DAdapter ? Oops ?."));
} else if (adapter->type == IDI_DIMAINT) {
if (removal) {
stop_dbg();
DbgDeregister();
memset(&MAdapter, 0, sizeof(MAdapter));
dprintf = no_printf;
} else {
memcpy(&MAdapter, adapter, sizeof(MAdapter));
dprintf = (DIVA_DI_PRINTF) MAdapter.request;
......@@ -131,8 +118,6 @@ static void DIVA_EXIT_FUNCTION disconnect_didd(void)
{
IDI_SYNC_REQ req;
stop_dbg();
req.didd_notify.e.Req = 0;
req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY;
req.didd_notify.info.handle = notify_handle;
......@@ -193,34 +178,63 @@ int maint_read_write(void __user *buf, int count)
}
break;
/*
Filter commands will ignore the ID due to fact that filtering affects
the B- channel and Audio Tap trace levels only. Also MAINT driver will
select the right trace ID by itself
*/
case DITRACE_WRITE_SELECTIVE_TRACE_FILTER:
if (!mask) {
ret = diva_set_trace_filter (1, "*");
} else if (mask < sizeof(data)) {
if (copy_from_user((void *)&data[0], (void *)(((byte*)buf)+12), mask)) {
ret = -EFAULT;
} else {
ret = diva_set_trace_filter ((int)mask, data);
}
} else {
ret = -EINVAL;
}
break;
case DITRACE_READ_SELECTIVE_TRACE_FILTER:
if ((ret = diva_get_trace_filter (sizeof(data), data)) > 0) {
if (copy_to_user ((void*)buf, (void*)&data[0], ret))
ret = -EFAULT;
} else {
ret = -ENODEV;
}
break;
case DITRACE_READ_TRACE_ENTRY:{
diva_os_spin_lock_magic_t old_irql;
word size;
diva_dbg_entry_head_t *pmsg;
byte *pbuf;
if ((pmsg = diva_maint_get_message(&size, &old_irql))) {
if (!(pbuf = diva_os_malloc(0, mask))) {
return (-ENOMEM);
}
for(;;) {
if (!(pmsg =
diva_maint_get_message(&size, &old_irql))) {
break;
}
if (size > mask) {
diva_maint_ack_message(0, &old_irql);
ret = -EINVAL;
} else {
if (!(pbuf = diva_os_malloc_tbuffer(0, size)))
{
diva_maint_ack_message(0, &old_irql);
ret = -ENOMEM;
} else {
ret = size;
memcpy(pbuf, pmsg, size);
diva_maint_ack_message(1, &old_irql);
if ((count < size) || diva_os_copy_to_user (NULL, buf,
(void *) pbuf, size))
ret = -EFAULT;
diva_os_free_tbuffer(0, pbuf);
}
break;
}
} else {
ret = 0;
ret = size;
memcpy(pbuf, pmsg, size);
diva_maint_ack_message(1, &old_irql);
if ((count < size) ||
diva_os_copy_to_user (NULL, buf, (void *) pbuf, size))
ret = -EFAULT;
break;
}
diva_os_free(0, pbuf);
}
break;
......@@ -235,7 +249,7 @@ int maint_read_write(void __user *buf, int count)
ret = -EINVAL;
break;
}
if (!(pbuf = diva_os_malloc_tbuffer(0, mask))) {
if (!(pbuf = diva_os_malloc(0, mask))) {
return (-ENOMEM);
}
......@@ -273,7 +287,7 @@ int maint_read_write(void __user *buf, int count)
} else {
ret = written;
}
diva_os_free_tbuffer(0, pbuf);
diva_os_free(0, pbuf);
}
break;
......@@ -302,7 +316,7 @@ int DIVA_INIT_FUNCTION mntfunc_init(int *buffer_length, void **buffer,
} else {
while ((*buffer_length >= (64 * 1024))
&&
(!(*buffer = diva_os_malloc_tbuffer(0, *buffer_length)))) {
(!(*buffer = diva_os_malloc (0, *buffer_length)))) {
*buffer_length -= 1024;
}
......@@ -314,7 +328,7 @@ int DIVA_INIT_FUNCTION mntfunc_init(int *buffer_length, void **buffer,
if (diva_maint_init(*buffer, *buffer_length, (diva_dbg_mem == 0))) {
if (!diva_dbg_mem) {
diva_os_free_tbuffer(0, *buffer);
diva_os_free (0, *buffer);
}
DBG_ERR(("init: maint init failed"));
return (0);
......@@ -324,7 +338,7 @@ int DIVA_INIT_FUNCTION mntfunc_init(int *buffer_length, void **buffer,
DBG_ERR(("init: failed to connect to DIDD."));
diva_maint_finit();
if (!diva_dbg_mem) {
diva_os_free_tbuffer(0, *buffer);
diva_os_free (0, *buffer);
}
return (0);
}
......@@ -339,6 +353,8 @@ void DIVA_EXIT_FUNCTION mntfunc_finit(void)
void *buffer;
int i = 100;
DbgDeregister();
while (diva_mnt_shutdown_xdi_adapters() && i--) {
diva_os_sleep(10);
}
......@@ -346,6 +362,9 @@ void DIVA_EXIT_FUNCTION mntfunc_finit(void)
disconnect_didd();
if ((buffer = diva_maint_finit())) {
diva_os_free_tbuffer(0, buffer);
diva_os_free (0, buffer);
}
memset(&MAdapter, 0, sizeof(MAdapter));
dprintf = no_printf;
}
......@@ -143,6 +143,7 @@ struct dual
#define N_DATA_ACK 12 /* data ack ind for D-bit procedure */
#define N_EDATA_ACK 13 /* data ack ind for INTERRUPT */
#define N_XON 15 /* clear RNR state */
#define N_COMBI_IND N_XON /* combined indication */
#define N_Q_BIT 0x10 /* Q-bit for req/ind */
#define N_M_BIT 0x20 /* M-bit for req/ind */
#define N_D_BIT 0x40 /* D-bit for req/ind */
......@@ -228,6 +229,10 @@ struct dual
#define VSWITCH_IND 66 /* capifunctions for D-CH-switching */
#define MWI_POLL 67 /* Message Waiting Status Request fkt */
#define CALL_PEND_NOTIFY 68 /* notify capi to set new listen */
#define DO_NOTHING 69 /* dont do somethin if you get this */
#define INT_CT_REJ 70 /* ECT rejected internal command */
#define CALL_HOLD_COMPLETE 71 /* In NT Mode indicate hold complete */
#define CALL_RETRIEVE_COMPLETE 72 /* In NT Mode indicate retrieve complete */
/*------------------------------------------------------------------*/
/* management service primitives */
/*------------------------------------------------------------------*/
......@@ -241,6 +246,7 @@ struct dual
#define MAN_INFO_IND 2
#define MAN_EVENT_IND 3
#define MAN_TRACE_IND 4
#define MAN_COMBI_IND 9
#define MAN_ESC 0x80
/*------------------------------------------------------------------*/
/* return code coding */
......@@ -265,6 +271,7 @@ struct dual
/*------------------------------------------------------------------*/
#define SHIFT 0x90 /* codeset shift */
#define MORE 0xa0 /* more data */
#define SDNCMPL 0xa1 /* sending complete */
#define CL 0xb0 /* congestion level */
/* codeset 0 */
#define SMSG 0x00 /* segmented message */
......@@ -288,6 +295,8 @@ struct dual
#define RDX 0x73 /* redirecting number extended */
#define RDN 0x74 /* redirecting number */
#define RIN 0x76 /* redirection number */
#define IUP 0x76 /* VN6 rerouter->PCS (codeset 6) */
#define IPU 0x77 /* VN6 PCS->rerouter (codeset 6) */
#define RI 0x79 /* restart indicator */
#define MIE 0x7a /* management info element */
#define LLC 0x7c /* low layer compatibility */
......@@ -296,6 +305,8 @@ struct dual
#define ESC 0x7f /* escape extension */
#define DLC 0x20 /* data link layer configuration */
#define NLC 0x21 /* network layer configuration */
#define REDIRECT_IE 0x22 /* redirection request/indication data */
#define REDIRECT_NET_IE 0x23 /* redirection network override data */
/* codeset 6 */
#define SIN 0x01 /* service indicator */
#define CIF 0x02 /* charging information */
......@@ -306,6 +317,7 @@ struct dual
/*------------------------------------------------------------------*/
#define MSGTYPEIE 0x7a /* Messagetype info element */
#define CRIE 0x7b /* INFO info element */
#define CODESET6IE 0xec /* Tunnel for Codeset 6 IEs */
#define VSWITCHIE 0xed /* VSwitch info element */
#define SSEXTIE 0xee /* Supplem. Service info element */
#define PROFILEIE 0xef /* Profile info element */
......@@ -344,6 +356,13 @@ struct dual
#define CCBS_REQUEST 0x32
#define CCBS_DEACTIVATE 0x33
#define CCBS_INTERROGATE 0x34
#define CCBS_STATUS 0x35
#define CCBS_ERASE 0x36
#define CCBS_B_FREE 0x37
#define CCNR_INFO_RETAIN 0x38
#define CCBS_REMOTE_USER_FREE 0x39
#define CCNR_REQUEST 0x3a
#define CCNR_INTERROGATE 0x3b
#define GET_SUPPORTED_SERVICES 0xff
#define DIVERSION_PROCEDURE_CFU 0x70
#define DIVERSION_PROCEDURE_CFB 0x71
......@@ -362,6 +381,7 @@ struct dual
#define SMASK_3PTY 0x00000008
#define SMASK_CALL_FORWARDING 0x00000010
#define SMASK_CALL_DEFLECTION 0x00000020
#define SMASK_MCID 0x00000040
#define SMASK_CCBS 0x00000080
#define SMASK_MWI 0x00000100
#define SMASK_CCNR 0x00000200
......@@ -406,6 +426,8 @@ struct dual
#define RTPL2_IN 13 /* RTP layer-2 protocol, incomming */
#define RTPL2 14 /* RTP layer-2 protocol */
#define V120_V42BIS 15 /* V.120 asynchronous mode supporting V.42bis compression */
#define LISTENER 27 /* Layer 2 to listen line */
#define MTP2 28 /* MTP2 Layer 2 */
#define PIAFS_CRC 29 /* PIAFS Layer 2 with CRC calculation at L2 */
/* ------------------------------------------------------
PIAFS DLC DEFINITIONS
......@@ -506,6 +528,22 @@ Byte | 8 7 6 5 4 3 2 1
| | | data transfer. |
+---------------------+------+-----------------------------------------+
*/
/* ------------------------------------------------------
LISTENER DLC DEFINITIONS
------------------------------------------------------ */
#define LISTENER_FEATURE_MASK_CUMMULATIVE 0x0001
/* ------------------------------------------------------
LISTENER META-FRAME CODE/PRIMITIVE DEFINITIONS
------------------------------------------------------ */
#define META_CODE_LL_UDATA_RX 0x01
#define META_CODE_LL_UDATA_TX 0x02
#define META_CODE_LL_DATA_RX 0x03
#define META_CODE_LL_DATA_TX 0x04
#define META_CODE_LL_MDATA_RX 0x05
#define META_CODE_LL_MDATA_TX 0x06
#define META_CODE_EMPTY 0x10
#define META_CODE_LOST_FRAMES 0x11
#define META_FLAG_TRUNCATED 0x0001
/*------------------------------------------------------------------*/
/* CAPI-like profile to indicate features on LAW_REQ */
/*------------------------------------------------------------------*/
......@@ -577,6 +615,14 @@ Byte | 8 7 6 5 4 3 2 1
#define MANUFACTURER_FEATURE_DMACONNECT 0x04000000L
#define MANUFACTURER_FEATURE_AUDIO_TAP 0x08000000L
#define MANUFACTURER_FEATURE_FAX_NONSTANDARD 0x10000000L
#define MANUFACTURER_FEATURE_SS7 0x20000000L
#define MANUFACTURER_FEATURE_MADAPTER 0x40000000L
#define MANUFACTURER_FEATURE_MEASURE 0x80000000L
#define MANUFACTURER_FEATURE2_LISTENING 0x00000001L
#define MANUFACTURER_FEATURE2_SS_DIFFCONTPOSSIBLE 0x00000002L
#define MANUFACTURER_FEATURE2_GENERIC_TONE 0x00000004L
#define MANUFACTURER_FEATURE2_COLOR_FAX 0x00000008L
#define MANUFACTURER_FEATURE2_SS_ECT_DIFFCONTPOSSIBLE 0x00000010L
#define RTP_PRIM_PAYLOAD_PCMU_8000 0
#define RTP_PRIM_PAYLOAD_1016_8000 1
#define RTP_PRIM_PAYLOAD_G726_32_8000 2
......@@ -624,6 +670,15 @@ Byte | 8 7 6 5 4 3 2 1
#define VSINVOKEID 4
#define VSCLMRKS 5
#define VSTBCTIDENT 6
#define VSETSILINKID 7
#define VSSAMECONTROLLER 8
/* Errorcodes for VSETSILINKID begin */
#define VSETSILINKIDRRWC 1
#define VSETSILINKIDREJECT 2
#define VSETSILINKIDTIMEOUT 3
#define VSETSILINKIDFAILCOUNT 4
#define VSETSILINKIDERROR 5
/* Errorcodes for VSETSILINKID end */
/* -----------------------------------------------------------**
** The PROTOCOL_FEATURE_STRING in feature.h (included **
** in prstart.sx and astart.sx) defines capabilities and **
......@@ -647,5 +702,37 @@ Byte | 8 7 6 5 4 3 2 1
#define PROTCAP_FREE13 0x2000 /* not used */
#define PROTCAP_FREE14 0x4000 /* not used */
#define PROTCAP_EXTENSION 0x8000 /* used for future extentions */
/* -----------------------------------------------------------* */
/* Onhook data transmission ETS30065901 */
/* Message Type */
/*#define RESERVED4 0x4*/
#define CALL_SETUP 0x80
#define MESSAGE_WAITING_INDICATOR 0x82
/*#define RESERVED84 0x84*/
/*#define RESERVED85 0x85*/
#define ADVICE_OF_CHARGE 0x86
/*1111 0001
to
1111 1111
F1H - Reserved for network operator use
to
FFH*/
/* Parameter Types */
#define DATE_AND_TIME 1
#define CLI_PARAMETER_TYPE 2
#define CALLED_DIRECTORY_NUMBER_PARAMETER_TYPE 3
#define REASON_FOR_ABSENCE_OF_CLI_PARAMETER_TYPE 4
#define NAME_PARAMETER_TYPE 7
#define REASON_FOR_ABSENCE_OF_CALLING_PARTY_NAME_PARAMETER_TYPE 8
#define VISUAL_INDICATOR_PARAMETER_TYPE 0xb
#define COMPLEMENTARY_CLI_PARAMETER_TYPE 0x10
#define CALL_TYPE_PARAMETER_TYPE 0x11
#define FIRST_CALLED_LINE_DIRECTORY_NUMBER_PARAMETER_TYPE 0x12
#define NETWORK_MESSAGE_SYSTEM_STATUS_PARAMETER_TYPE 0x13
#define FORWARDED_CALL_TYPE_PARAMETER_TYPE 0x15
#define TYPE_OF_CALLING_USER_PARAMETER_TYPE 0x16
#define REDIRECTING_NUMBER_PARAMETER_TYPE 0x1a
#define EXTENSION_FOR_NETWORK_OPERATOR_USE_PARAMETER_TYPE 0xe0
/* -----------------------------------------------------------* */
#else
#endif /* PC_H_INCLUDED } */
/* $Id: platform.h,v 1.37.4.1 2004/07/28 14:47:21 armin Exp $
/* $Id: platform.h,v 1.37.4.2 2004/08/28 20:03:53 armin Exp $
*
* platform.h
*
......@@ -269,20 +269,6 @@ static __inline__ void diva_os_leave_spin_lock (diva_os_spin_lock_t* a, \
diva_os_spin_lock_magic_t* old_irql, \
void* dbg) { spin_unlock_bh(a); }
static __inline__ void diva_os_enter_spin_lock_hard (diva_os_spin_lock_t* a, \
diva_os_spin_lock_magic_t* old_irql, \
void* dbg) { \
unsigned long flags; \
spin_lock_irqsave (a, flags); \
*old_irql = (diva_os_spin_lock_magic_t)flags; \
}
static __inline__ void diva_os_leave_spin_lock_hard (diva_os_spin_lock_t* a, \
diva_os_spin_lock_magic_t* old_irql, \
void* dbg) { \
unsigned long flags = (unsigned long)*old_irql; \
spin_unlock_irqrestore (a, flags); \
}
#define diva_os_destroy_spin_lock(a,b) do { } while(0)
/*
......@@ -347,12 +333,18 @@ diva_os_atomic_decrement(diva_os_atomic_t* pv)
#define DIVA_IDI_RX_DMA 1
/*
** endian macros
*/
#define READ_WORD(addr) readw(addr)
#define READ_DWORD(addr) readl(addr)
#define WRITE_WORD(addr,v) writew(v,addr)
#define WRITE_DWORD(addr,v) writel(v,addr)
/*
** 32/64 bit macors
*/
#ifdef BITS_PER_LONG
#if BITS_PER_LONG > 32
#define PLATFORM_GT_32BIT
......@@ -360,8 +352,23 @@ diva_os_atomic_decrement(diva_os_atomic_t* pv)
#endif
#endif
/*
** undef os definitions of macros we use
*/
#undef ID_MASK
#undef N_DATA
#undef ADDR
/*
** dump file
*/
#define diva_os_dump_file_t char
#define diva_os_board_trace_t char
#define diva_os_dump_file(__x__) do { } while(0)
/*
** size of internal arrays
*/
#define MAX_DESCRIPTORS 64
#endif /* __PLATFORM_H__ */
......@@ -212,7 +212,7 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
#define MAX_TXABSDELAY 0xFFFF
#define MIN_TXABSDELAY 0
#define DEFAULT_ITR 1
#define DEFAULT_ITR 8000
#define MAX_ITR 100000
#define MIN_ITR 100
......
......@@ -228,6 +228,21 @@ typedef struct emac_regs {
(desc & EMAC_BAD_RX_PACKET)
#endif
/* SoC implementation specific EMAC register defaults */
#if defined(CONFIG_440GP)
#define EMAC_RWMR_DEFAULT 0x80009000
#define EMAC_TMR0_DEFAULT 0x00000000
#define EMAC_TMR1_DEFAULT 0xf8640000
#elif defined(CONFIG_440GX)
#define EMAC_RWMR_DEFAULT 0x1000a200
#define EMAC_TMR0_DEFAULT EMAC_TMR0_TFAE_2_32
#define EMAC_TMR1_DEFAULT 0xa00f0000
#else
#define EMAC_RWMR_DEFAULT 0x0f002000
#define EMAC_TMR0_DEFAULT 0x00000000
#define EMAC_TMR1_DEFAULT 0x380f0000
#endif /* CONFIG_440GP */
/* Revision specific EMAC register defaults */
#ifdef CONFIG_IBM_EMAC4
#define EMAC_M1_DEFAULT (EMAC_M1_BASE | \
......@@ -236,7 +251,7 @@ typedef struct emac_regs {
#define EMAC_RMR_DEFAULT (EMAC_RMR_BASE | \
EMAC_RMR_RFAF_128_2048)
#define EMAC_TMR0_XMIT (EMAC_TMR0_GNP0 | \
EMAC_TMR0_TFAE_128_2048)
EMAC_TMR0_DEFAULT)
#define EMAC_TRTR_DEFAULT EMAC_TRTR_1024
#else /* !CONFIG_IBM_EMAC4 */
#define EMAC_M1_DEFAULT EMAC_M1_BASE
......@@ -245,19 +260,4 @@ typedef struct emac_regs {
#define EMAC_TRTR_DEFAULT EMAC_TRTR_1600
#endif /* CONFIG_IBM_EMAC4 */
/* SoC implementation specific EMAC register defaults */
#if defined(CONFIG_440GP)
#define EMAC_RWMR_DEFAULT 0x80009000
#define EMAC_TMR0_DEFAULT 0x00000000
#define EMAC_TMR1_DEFAULT 0xf8640000
#elif defined(CONFIG_440GX)
#define EMAC_RWMR_DEFAULT 0x1000a200
#define EMAC_TMR0_DEFAULT EMAC_TMR0_TFAE_128_2048
#define EMAC_TMR1_DEFAULT 0x88810000
#else
#define EMAC_RWMR_DEFAULT 0x0f002000
#define EMAC_TMR0_DEFAULT 0x00000000
#define EMAC_TMR1_DEFAULT 0x380f0000
#endif /* CONFIG_440GP */
#endif
......@@ -90,23 +90,24 @@ MODULE_PARM_DESC(skb_res, "Amount of data to reserve on skb buffs\n"
#define RGMII_PRIV(ocpdev) ((struct ibm_ocp_rgmii*)ocp_get_drvdata(ocpdev))
static unsigned int rgmii_enable[] =
{ RGMII_RTBI, RGMII_RGMII, RGMII_TBI, RGMII_GMII };
static unsigned int rgmii_enable[] = {
RGMII_RTBI,
RGMII_RGMII,
RGMII_TBI,
RGMII_GMII
};
static unsigned int rgmii_speed_mask[] = { 0,
0,
static unsigned int rgmii_speed_mask[] = {
RGMII_MII2_SPDMASK,
RGMII_MII3_SPDMASK
};
static unsigned int rgmii_speed100[] = { 0,
0,
static unsigned int rgmii_speed100[] = {
RGMII_MII2_100MB,
RGMII_MII3_100MB
};
static unsigned int rgmii_speed1000[] = { 0,
0,
static unsigned int rgmii_speed1000[] = {
RGMII_MII2_1000MB,
RGMII_MII3_1000MB
};
......@@ -122,11 +123,21 @@ static unsigned int zmii_enable[][4] = {
~(ZMII_MDI0 | ZMII_MDI1 | ZMII_MDI3)},
{ZMII_SMII3, ZMII_RMII3, ZMII_MII3, ~(ZMII_MDI0 | ZMII_MDI1 | ZMII_MDI2)}
};
static unsigned int mdi_enable[] =
{ ZMII_MDI0, ZMII_MDI1, ZMII_MDI2, ZMII_MDI3 };
static unsigned int mdi_enable[] = {
ZMII_MDI0,
ZMII_MDI1,
ZMII_MDI2,
ZMII_MDI3
};
static unsigned int zmii_speed = 0x0;
static unsigned int zmii_speed100[] = { ZMII_MII0_100MB, ZMII_MII1_100MB };
static unsigned int zmii_speed100[] = {
ZMII_MII0_100MB,
ZMII_MII1_100MB,
ZMII_MII2_100MB,
ZMII_MII3_100MB
};
/* Since multiple EMACs share MDIO lines in various ways, we need
* to avoid re-using the same PHY ID in cases where the arch didn't
......@@ -367,6 +378,7 @@ static void emac_close_zmii(struct ocp_device *ocpdev)
int emac_phy_read(struct net_device *dev, int mii_id, int reg)
{
int count;
uint32_t stacr;
struct ocp_enet_private *fep = dev->priv;
emac_t *emacp = fep->emacp;
......@@ -385,9 +397,13 @@ int emac_phy_read(struct net_device *dev, int mii_id, int reg)
emacp = fep->emacp;
}
udelay(MDIO_DELAY);
count = 0;
while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0)
&& (count++ < MDIO_DELAY))
udelay(1);
MDIO_DEBUG((" (count was %d)\n", count));
if ((in_be32(&emacp->em0stacr) & EMAC_STACR_OC) == 0) {
if ((stacr & EMAC_STACR_OC) == 0) {
printk(KERN_WARNING "%s: PHY read timeout #1!\n", dev->name);
return -1;
}
......@@ -398,8 +414,11 @@ int emac_phy_read(struct net_device *dev, int mii_id, int reg)
out_be32(&emacp->em0stacr, stacr);
udelay(MDIO_DELAY);
stacr = in_be32(&emacp->em0stacr);
count = 0;
while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0)
&& (count++ < MDIO_DELAY))
udelay(1);
MDIO_DEBUG((" (count was %d)\n", count));
if ((stacr & EMAC_STACR_OC) == 0) {
printk(KERN_WARNING "%s: PHY read timeout #2!\n", dev->name);
......@@ -419,6 +438,7 @@ int emac_phy_read(struct net_device *dev, int mii_id, int reg)
void emac_phy_write(struct net_device *dev, int mii_id, int reg, int data)
{
int count;
uint32_t stacr;
struct ocp_enet_private *fep = dev->priv;
emac_t *emacp = fep->emacp;
......@@ -437,9 +457,13 @@ void emac_phy_write(struct net_device *dev, int mii_id, int reg, int data)
emacp = fep->emacp;
}
udelay(MDIO_DELAY);
count = 0;
while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0)
&& (count++ < MDIO_DELAY))
udelay(1);
MDIO_DEBUG((" (count was %d)\n", count));
if ((in_be32(&emacp->em0stacr) & EMAC_STACR_OC) == 0) {
if ((stacr & EMAC_STACR_OC) == 0) {
printk(KERN_WARNING "%s: PHY write timeout #2!\n", dev->name);
return;
}
......@@ -451,9 +475,12 @@ void emac_phy_write(struct net_device *dev, int mii_id, int reg, int data)
out_be32(&emacp->em0stacr, stacr);
udelay(MDIO_DELAY);
while (((stacr = in_be32(&emacp->em0stacr) & EMAC_STACR_OC) == 0)
&& (count++ < 5000))
udelay(1);
MDIO_DEBUG((" (count was %d)\n", count));
if ((in_be32(&emacp->em0stacr) & EMAC_STACR_OC) == 0)
if ((stacr & EMAC_STACR_OC) == 0)
printk(KERN_WARNING "%s: PHY write timeout #2!\n", dev->name);
/* Check for a write error */
......@@ -1940,8 +1967,6 @@ static struct ocp_driver emac_driver = {
static int __init emac_init(void)
{
int rc;
printk(KERN_INFO DRV_NAME ": " DRV_DESC ", version " DRV_VERSION "\n");
printk(KERN_INFO "Maintained by " DRV_AUTHOR "\n");
......@@ -1950,13 +1975,8 @@ static int __init emac_init(void)
skb_res);
skb_res = 2;
}
rc = ocp_register_driver(&emac_driver);
if (rc < 0) {
ocp_unregister_driver(&emac_driver);
return -ENODEV;
}
return 0;
return ocp_register_driver(&emac_driver);
}
static void __exit emac_exit(void)
......
......@@ -67,7 +67,7 @@
#define TX_TIMEOUT (2*HZ)
/* MDIO latency delay */
#define MDIO_DELAY 50
#define MDIO_DELAY 250
/* Power managment shift registers */
#define IBM_CPM_EMMII 0 /* Shift value for MII */
......
......@@ -124,6 +124,7 @@ static struct mii_chip_info {
{ "AMD 79C901 HomePNA PHY", 0x0000, 0x6B90, HOME},
{ "ICS LAN PHY", 0x0015, 0xF440, LAN },
{ "NS 83851 PHY", 0x2000, 0x5C20, MIX },
{ "NS 83847 PHY", 0x2000, 0x5C30, MIX },
{ "Realtek RTL8201 PHY", 0x0000, 0x8200, LAN },
{ "VIA 6103 PHY", 0x0101, 0x8f20, LAN },
{NULL,},
......
......@@ -216,7 +216,21 @@ NCR_Q720_probe(struct device *dev)
goto out_free;
}
mem_base = (__u32)ioremap(base_addr, mem_size);
if (dma_declare_coherent_memory(dev, base_addr, base_addr,
mem_size, DMA_MEMORY_MAP)
!= DMA_MEMORY_MAP) {
printk(KERN_ERR "NCR_Q720: DMA declare memory failed\n");
goto out_release_region;
}
/* The first 1k of the memory buffer is a memory map of the registers
*/
mem_base = (__u32)dma_mark_declared_memory_occupied(dev, base_addr,
1024);
if (IS_ERR((void *)mem_base)) {
printk("NCR_Q720 failed to reserve memory mapped region\n");
goto out_release;
}
/* now also enable accesses in asr 2 */
asr2 = inb(io_base + 0x0a);
......@@ -296,7 +310,8 @@ NCR_Q720_probe(struct device *dev)
return 0;
out_release:
iounmap((void *)mem_base);
dma_release_declared_memory(dev);
out_release_region:
release_mem_region(base_addr, mem_size);
out_free:
kfree(p);
......@@ -321,7 +336,7 @@ NCR_Q720_remove(struct device *dev)
if(p->hosts[i])
NCR_Q720_remove_one(p->hosts[i]);
iounmap((void *)p->mem_base);
dma_release_declared_memory(dev);
release_mem_region(p->phys_mem_base, p->mem_size);
free_irq(p->irq, p);
kfree(p);
......
/*
* Copyright (c) International Business Machines Corp., 2002
* Copyright (c) Andreas Gruenbacher, 2001
* Copyright (c) Linus Torvalds, 1991, 1992
* Copyright (C) International Business Machines Corp., 2002-2004
* Copyright (C) Andreas Gruenbacher, 2001
* Copyright (C) Linus Torvalds, 1991, 1992
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/quotaops.h>
#include "jfs_incore.h"
#include "jfs_xattr.h"
#include "jfs_acl.h"
......@@ -281,6 +282,12 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
if (rc)
return rc;
if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
(iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
if (DQUOT_TRANSFER(inode, iattr))
return -EDQUOT;
}
rc = inode_setattr(inode, iattr);
if (!rc && (iattr->ia_valid & ATTR_MODE))
......
/*
* Copyright (c) International Business Machines Corp., 2000-2002
* Portions Copyright (c) Christoph Hellwig, 2001-2002
* Copyright (C) International Business Machines Corp., 2000-2004
* Portions Copyright (C) Christoph Hellwig, 2001-2002
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -21,6 +21,7 @@
#include <linux/mpage.h>
#include <linux/buffer_head.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
#include "jfs_imap.h"
......@@ -134,6 +135,13 @@ void jfs_delete_inode(struct inode *inode)
diFree(inode);
/*
* Free the inode from the quota allocation.
*/
DQUOT_INIT(inode);
DQUOT_FREE_INODE(inode);
DQUOT_DROP(inode);
clear_inode(inode);
}
......
......@@ -101,6 +101,7 @@
*/
#include <linux/fs.h>
#include <linux/quotaops.h>
#include "jfs_incore.h"
#include "jfs_superblock.h"
#include "jfs_filsys.h"
......@@ -380,7 +381,8 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
* It's time to move the inline table to an external
* page and begin to build the xtree
*/
if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr))
if (DQUOT_ALLOC_BLOCK(ip, sbi->nbperpage) ||
dbAlloc(ip, 0, sbi->nbperpage, &xaddr))
goto clean_up; /* No space */
/*
......@@ -405,7 +407,6 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
goto clean_up;
}
ip->i_size = PSIZE;
ip->i_blocks += LBLK2PBLK(sb, sbi->nbperpage);
if ((mp = get_index_page(ip, 0)) == 0) {
jfs_err("add_index: get_metapage failed!");
......@@ -447,7 +448,6 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
goto clean_up;
}
ip->i_size += PSIZE;
ip->i_blocks += LBLK2PBLK(sb, sbi->nbperpage);
if ((mp = get_index_page(ip, blkno)))
memset(mp->data, 0, PSIZE); /* Just looks better */
......@@ -946,6 +946,7 @@ static int dtSplitUp(tid_t tid,
struct dt_lock *dtlck;
struct tlock *tlck;
struct lv *lv;
int quota_allocation = 0;
/* get split page */
smp = split->mp;
......@@ -992,7 +993,9 @@ static int dtSplitUp(tid_t tid,
split->pxdlist = &pxdlist;
rc = dtSplitRoot(tid, ip, split, &rmp);
if (!rc)
if (rc)
dbFree(ip, xaddr, xlen);
else
DT_PUTPAGE(rmp);
DT_PUTPAGE(smp);
......@@ -1017,6 +1020,14 @@ static int dtSplitUp(tid_t tid,
n = xlen + (xlen << 1);
else
n = xlen;
/* Allocate blocks to quota. */
if (DQUOT_ALLOC_BLOCK(ip, n)) {
rc = -EDQUOT;
goto extendOut;
}
quota_allocation += n;
if ((rc = dbReAlloc(sbi->ipbmap, xaddr, (s64) xlen,
(s64) n, &nxaddr)))
goto extendOut;
......@@ -1285,6 +1296,10 @@ static int dtSplitUp(tid_t tid,
freeKeyName:
kfree(key.name);
/* Rollback quota allocation */
if (rc && quota_allocation)
DQUOT_FREE_BLOCK(ip, quota_allocation);
dtSplitUp_Exit:
return rc;
......@@ -1305,7 +1320,6 @@ static int dtSplitUp(tid_t tid,
static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
struct metapage ** rmpp, dtpage_t ** rpp, pxd_t * rpxdp)
{
struct super_block *sb = ip->i_sb;
int rc = 0;
struct metapage *smp;
dtpage_t *sp;
......@@ -1344,6 +1358,12 @@ static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
if (rmp == NULL)
return -EIO;
/* Allocate blocks to quota. */
if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
release_metapage(rmp);
return -EDQUOT;
}
jfs_info("dtSplitPage: ip:0x%p smp:0x%p rmp:0x%p", ip, smp, rmp);
BT_MARK_DIRTY(rmp, ip);
......@@ -1593,8 +1613,6 @@ static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
*rmpp = rmp;
*rpxdp = *pxd;
ip->i_blocks += LBLK2PBLK(sb, lengthPXD(pxd));
return rc;
}
......@@ -1823,16 +1841,6 @@ static int dtExtendPage(tid_t tid,
tpxd = (pxd_t *) & pp->slot[1];
*tpxd = *pxd;
/* Since the directory might have an EA and/or ACL associated with it
* we need to make sure we take that into account when setting the
* i_nblocks
*/
ip->i_blocks = LBLK2PBLK(ip->i_sb, xlen +
((JFS_IP(ip)->ea.flag & DXD_EXTENT) ?
lengthDXD(&JFS_IP(ip)->ea) : 0) +
((JFS_IP(ip)->acl.flag & DXD_EXTENT) ?
lengthDXD(&JFS_IP(ip)->acl) : 0));
DT_PUTPAGE(pmp);
return 0;
}
......@@ -1900,6 +1908,12 @@ static int dtSplitRoot(tid_t tid,
rp = rmp->data;
/* Allocate blocks to quota. */
if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
release_metapage(rmp);
return -EDQUOT;
}
BT_MARK_DIRTY(rmp, ip);
/*
* acquire a transaction lock on the new right page
......@@ -2042,7 +2056,6 @@ static int dtSplitRoot(tid_t tid,
*rmpp = rmp;
ip->i_blocks += LBLK2PBLK(ip->i_sb, lengthPXD(pxd));
return 0;
}
......@@ -2265,7 +2278,9 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
}
xlen = lengthPXD(&fp->header.self);
ip->i_blocks -= LBLK2PBLK(ip->i_sb, xlen);
/* Free quota allocation. */
DQUOT_FREE_BLOCK(ip, xlen);
/* free/invalidate its buffer page */
discard_metapage(fmp);
......@@ -2339,7 +2354,9 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
}
xlen = lengthPXD(&p->header.self);
ip->i_blocks -= LBLK2PBLK(ip->i_sb, xlen);
/* Free quota allocation */
DQUOT_FREE_BLOCK(ip, xlen);
/* free/invalidate its buffer page */
discard_metapage(mp);
......@@ -2877,14 +2894,6 @@ void dtInitRoot(tid_t tid, struct inode *ip, u32 idotdot)
/* init '..' entry */
p->header.idotdot = cpu_to_le32(idotdot);
#if 0
ip->i_blocks = LBLK2PBLK(ip->i_sb,
((jfs_ip->ea.flag & DXD_EXTENT) ?
lengthDXD(&jfs_ip->ea) : 0) +
((jfs_ip->acl.flag & DXD_EXTENT) ?
lengthDXD(&jfs_ip->acl) : 0));
#endif
return;
}
......
/*
* Copyright (C) International Business Machines Corp., 2000-2003
* Copyright (C) International Business Machines Corp., 2000-2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -17,6 +17,7 @@
*/
#include <linux/fs.h>
#include <linux/quotaops.h>
#include "jfs_incore.h"
#include "jfs_superblock.h"
#include "jfs_dmap.h"
......@@ -144,6 +145,13 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, boolean_t abnr)
return (rc);
}
/* Allocate blocks to quota. */
if (DQUOT_ALLOC_BLOCK(ip, nxlen)) {
dbFree(ip, nxaddr, (s64) nxlen);
up(&JFS_IP(ip)->commit_sem);
return -EDQUOT;
}
/* determine the value of the extent flag */
xflag = (abnr == TRUE) ? XAD_NOTRECORDED : 0;
......@@ -161,13 +169,11 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, boolean_t abnr)
*/
if (rc) {
dbFree(ip, nxaddr, nxlen);
DQUOT_FREE_BLOCK(ip, nxlen);
up(&JFS_IP(ip)->commit_sem);
return (rc);
}
/* update the number of blocks allocated to the file */
ip->i_blocks += LBLK2PBLK(ip->i_sb, nxlen);
/* set the results of the extent allocation */
XADaddress(xp, nxaddr);
XADlength(xp, nxlen);
......@@ -254,6 +260,13 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, boolean_t abnr)
if ((rc = extBrealloc(ip, xaddr, xlen, &nxlen, &nxaddr)))
goto exit;
/* Allocat blocks to quota. */
if (DQUOT_ALLOC_BLOCK(ip, nxlen)) {
dbFree(ip, nxaddr, (s64) nxlen);
up(&JFS_IP(ip)->commit_sem);
return -EDQUOT;
}
delta = nxlen - xlen;
/* check if the extend page is not abnr but the request is abnr
......@@ -289,6 +302,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, boolean_t abnr)
/* extend the extent */
if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) {
dbFree(ip, xaddr + xlen, delta);
DQUOT_FREE_BLOCK(ip, nxlen);
goto exit;
}
} else {
......@@ -299,6 +313,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, boolean_t abnr)
*/
if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) {
dbFree(ip, nxaddr, nxlen);
DQUOT_FREE_BLOCK(ip, nxlen);
goto exit;
}
}
......@@ -320,9 +335,6 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, boolean_t abnr)
}
}
/* update the inode with the number of blocks allocated */
ip->i_blocks += LBLK2PBLK(sb, delta);
/* set the return results */
XADaddress(xp, nxaddr);
XADlength(xp, nxlen);
......
/*
* Copyright (C) International Business Machines Corp., 2000-2003
* Copyright (C) International Business Machines Corp., 2000-2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -44,6 +44,7 @@
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
......@@ -504,6 +505,9 @@ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
ip->i_mapping->a_ops = &jfs_aops;
mapping_set_gfp_mask(ip->i_mapping, GFP_NOFS);
/* Allocations to metadata inodes should not affect quotas */
ip->i_flags |= S_NOQUOTA;
if ((inum == FILESYSTEM_I) && (JFS_IP(ip)->ipimap == sbi->ipaimap)) {
sbi->gengen = le32_to_cpu(dp->di_gengen);
sbi->inostamp = le32_to_cpu(dp->di_inostamp);
......@@ -2601,28 +2605,11 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
iagp->inosmap[i] = ONES;
flush_metapage(mp);
#ifdef _STILL_TO_PORT
/* synchronously write the iag page */
if (bmWrite(bp)) {
/* Free the blocks allocated for the iag since it was
* not successfully added to the inode map
*/
dbFree(ipimap, xaddr, (s64) xlen);
/* release the inode map lock */
IWRITE_UNLOCK(ipimap);
rc = -EIO;
goto out;
}
/* Now the iag is on disk */
/*
* start tyransaction of update of the inode map
* addressing structure pointing to the new iag page;
*/
#endif /* _STILL_TO_PORT */
tid = txBegin(sb, COMMIT_FORCE);
down(&JFS_IP(ipimap)->commit_sem);
......@@ -2644,7 +2631,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
/* update the inode map's inode to reflect the extension */
ipimap->i_size += PSIZE;
ipimap->i_blocks += LBLK2PBLK(sb, xlen);
inode_add_bytes(ipimap, PSIZE);
/*
* txCommit(COMMIT_FORCE) will synchronously write address
......@@ -3085,7 +3072,7 @@ static void duplicateIXtree(struct super_block *sb, s64 blkno,
}
/* update the inode map's inode to reflect the extension */
ip->i_size += PSIZE;
ip->i_blocks += LBLK2PBLK(sb, xlen);
inode_add_bytes(ip, PSIZE);
txCommit(tid, 1, &ip, COMMIT_FORCE);
cleanup:
txEnd(tid);
......
/*
* Copyright (c) International Business Machines Corp., 2000-2002
* Copyright (C) International Business Machines Corp., 2000-2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -17,6 +17,7 @@
*/
#include <linux/fs.h>
#include <linux/quotaops.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
#include "jfs_imap.h"
......@@ -60,6 +61,17 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
} else
inode->i_gid = current->fsgid;
/*
* Allocate inode to quota.
*/
if (DQUOT_ALLOC_INODE(inode)) {
DQUOT_DROP(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
iput(inode);
return NULL;
}
inode->i_mode = mode;
if (S_ISDIR(mode))
jfs_inode->mode2 = IDIRECTORY | mode;
......
......@@ -557,6 +557,7 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len)
if (page) {
block_invalidatepage(page, 0);
unlock_page(page);
page_cache_release(page);
}
}
}
......
......@@ -197,9 +197,6 @@ int jfs_mount(struct super_block *sb)
/*
* unwind on error
*/
//errout42: /* close fileset inode allocation map */
diUnmount(ipimap, 1);
errout41: /* close fileset inode allocation map inode */
diFreeSpecial(ipimap);
......
......@@ -2621,8 +2621,6 @@ void txAbort(tid_t tid, int dirty)
struct tblock *tblk = tid_to_tblock(tid);
struct tlock *tlck;
jfs_warn("txAbort: tid:%d dirty:0x%x", tid, dirty);
/*
* free tlocks of the transaction
*/
......
/*
* Copyright (C) International Business Machines Corp., 2000-2003
* Copyright (C) International Business Machines Corp., 2000-2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -20,6 +20,7 @@
*/
#include <linux/fs.h>
#include <linux/quotaops.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
#include "jfs_metapage.h"
......@@ -829,8 +830,12 @@ int xtInsert(tid_t tid, /* transaction id */
hint = addressXAD(xad) + lengthXAD(xad) - 1;
} else
hint = 0;
if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr)))
if ((rc = DQUOT_ALLOC_BLOCK(ip, xlen)))
goto out;
if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) {
DQUOT_FREE_BLOCK(ip, xlen);
goto out;
}
}
/*
......@@ -855,8 +860,10 @@ int xtInsert(tid_t tid, /* transaction id */
split.pxdlist = NULL;
if ((rc = xtSplitUp(tid, ip, &split, &btstack))) {
/* undo data extent allocation */
if (*xaddrp == 0)
if (*xaddrp == 0) {
dbFree(ip, xaddr, (s64) xlen);
DQUOT_FREE_BLOCK(ip, xlen);
}
return rc;
}
......@@ -1214,22 +1221,34 @@ xtSplitPage(tid_t tid, struct inode *ip,
pxd_t *pxd;
struct tlock *tlck;
struct xtlock *sxtlck = NULL, *rxtlck = NULL;
int quota_allocation = 0;
smp = split->mp;
sp = XT_PAGE(ip, smp);
INCREMENT(xtStat.split);
/*
* allocate the new right page for the split
*/
pxdlist = split->pxdlist;
pxd = &pxdlist->pxd[pxdlist->npxd];
pxdlist->npxd++;
rbn = addressPXD(pxd);
/* Allocate blocks to quota. */
if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
rc = -EDQUOT;
goto clean_up;
}
quota_allocation += lengthPXD(pxd);
/*
* allocate the new right page for the split
*/
rmp = get_metapage(ip, rbn, PSIZE, 1);
if (rmp == NULL)
return -EIO;
if (rmp == NULL) {
rc = -EIO;
goto clean_up;
}
jfs_info("xtSplitPage: ip:0x%p smp:0x%p rmp:0x%p", ip, smp, rmp);
......@@ -1304,8 +1323,6 @@ xtSplitPage(tid_t tid, struct inode *ip,
*rmpp = rmp;
*rbnp = rbn;
ip->i_blocks += LBLK2PBLK(ip->i_sb, lengthPXD(pxd));
jfs_info("xtSplitPage: sp:0x%p rp:0x%p", sp, rp);
return 0;
}
......@@ -1321,7 +1338,7 @@ xtSplitPage(tid_t tid, struct inode *ip,
XT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
if (rc) {
XT_PUTPAGE(rmp);
return rc;
goto clean_up;
}
BT_MARK_DIRTY(mp, ip);
......@@ -1420,10 +1437,16 @@ xtSplitPage(tid_t tid, struct inode *ip,
*rmpp = rmp;
*rbnp = rbn;
ip->i_blocks += LBLK2PBLK(ip->i_sb, lengthPXD(pxd));
jfs_info("xtSplitPage: sp:0x%p rp:0x%p", sp, rp);
return rc;
clean_up:
/* Rollback quota allocation. */
if (quota_allocation)
DQUOT_FREE_BLOCK(ip, quota_allocation);
return (rc);
}
......@@ -1478,6 +1501,12 @@ xtSplitRoot(tid_t tid,
if (rmp == NULL)
return -EIO;
/* Allocate blocks to quota. */
if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
release_metapage(rmp);
return -EDQUOT;
}
jfs_info("xtSplitRoot: ip:0x%p rmp:0x%p", ip, rmp);
/*
......@@ -1561,8 +1590,6 @@ xtSplitRoot(tid_t tid,
*rmpp = rmp;
ip->i_blocks += LBLK2PBLK(ip->i_sb, lengthPXD(pxd));
jfs_info("xtSplitRoot: sp:0x%p rp:0x%p", sp, rp);
return 0;
}
......@@ -3909,8 +3936,8 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
else
ip->i_size = newsize;
/* update nblocks to reflect freed blocks */
ip->i_blocks -= LBLK2PBLK(ip->i_sb, nfreed);
/* update quota allocation to reflect freed blocks */
DQUOT_FREE_BLOCK(ip, nfreed);
/*
* free tlock of invalidated pages
......
......@@ -19,6 +19,7 @@
#include <linux/fs.h>
#include <linux/ctype.h>
#include <linux/quotaops.h>
#include "jfs_incore.h"
#include "jfs_superblock.h"
#include "jfs_inode.h"
......@@ -123,10 +124,10 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, int mode,
*/
ino = ip->i_ino;
if ((rc = dtInsert(tid, dip, &dname, &ino, &btstack))) {
jfs_err("jfs_create: dtInsert returned %d", rc);
if (rc == -EIO)
if (rc == -EIO) {
jfs_err("jfs_create: dtInsert returned -EIO");
txAbort(tid, 1); /* Marks Filesystem dirty */
else
} else
txAbort(tid, 0); /* Filesystem full */
goto out3;
}
......@@ -250,11 +251,10 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode)
*/
ino = ip->i_ino;
if ((rc = dtInsert(tid, dip, &dname, &ino, &btstack))) {
jfs_err("jfs_mkdir: dtInsert returned %d", rc);
if (rc == -EIO)
if (rc == -EIO) {
jfs_err("jfs_mkdir: dtInsert returned -EIO");
txAbort(tid, 1); /* Marks Filesystem dirty */
else
} else
txAbort(tid, 0); /* Filesystem full */
goto out3;
}
......@@ -330,6 +330,9 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name);
/* Init inode for quota operations. */
DQUOT_INIT(ip);
/* directory must be empty to be removed */
if (!dtEmpty(ip)) {
rc = -ENOTEMPTY;
......@@ -455,6 +458,9 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
jfs_info("jfs_unlink: dip:0x%p name:%s", dip, dentry->d_name.name);
/* Init inode for quota operations. */
DQUOT_INIT(ip);
if ((rc = get_UCSname(&dname, dentry)))
goto out;
......@@ -813,7 +819,10 @@ static int jfs_link(struct dentry *old_dentry,
iplist[1] = dir;
rc = txCommit(tid, 2, &iplist[0], 0);
if (!rc)
if (rc) {
ip->i_nlink--;
iput(ip);
} else
d_instantiate(dentry, ip);
free_dname:
......@@ -964,7 +973,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
mp = get_metapage(ip, xaddr, PSIZE, 1);
if (mp == NULL) {
dbFree(ip, extent, xlen);
xtTruncate(tid, ip, 0, COMMIT_PWMAP);
rc = -EIO;
txAbort(tid, 0);
goto out3;
......@@ -975,7 +984,6 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
name += copy_size;
xaddr += JFS_SBI(sb)->nbperpage;
}
ip->i_blocks = LBLK2PBLK(sb, xlen);
}
/*
......@@ -988,7 +996,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
}
if (rc) {
if (xlen)
dbFree(ip, extent, xlen);
xtTruncate(tid, ip, 0, COMMIT_PWMAP);
txAbort(tid, 0);
/* discard new inode */
goto out3;
......@@ -1104,8 +1112,11 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
rc = -EMLINK;
goto out3;
}
} else if (new_ip)
} else if (new_ip) {
IWRITE_LOCK(new_ip);
/* Init inode for quota operations. */
DQUOT_INIT(new_ip);
}
/*
* The real work starts here
......@@ -1174,8 +1185,8 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
ino = old_ip->i_ino;
rc = dtInsert(tid, new_dir, &new_dname, &ino, &btstack);
if (rc) {
jfs_err("jfs_rename: dtInsert failed w/rc = %d",
rc);
if (rc == -EIO)
jfs_err("jfs_rename: dtInsert returned -EIO");
goto out4;
}
if (S_ISDIR(old_ip->i_mode))
......
/*
* Copyright (C) International Business Machines Corp., 2000-2003
* Copyright (C) International Business Machines Corp., 2000-2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -18,6 +18,7 @@
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/quotaops.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
#include "jfs_metapage.h"
......@@ -390,7 +391,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
}
/* update bmap file size */
ipbmap->i_size += xlen << sbi->l2bsize;
ipbmap->i_blocks += LBLK2PBLK(sb, xlen);
inode_add_bytes(ipbmap, xlen << sbi->l2bsize);
iplist[0] = ipbmap;
rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE);
......
/*
* Copyright (C) International Business Machines Corp., 2000-2003
* Copyright (C) International Business Machines Corp., 2000-2004
* Copyright (C) Christoph Hellwig, 2002
*
* This program is free software; you can redistribute it and/or modify
......@@ -19,6 +19,7 @@
#include <linux/fs.h>
#include <linux/xattr.h>
#include <linux/quotaops.h>
#include "jfs_incore.h"
#include "jfs_superblock.h"
#include "jfs_dmap.h"
......@@ -251,9 +252,17 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
/* figure out how many blocks we need */
nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits;
/* Allocate new blocks to quota. */
if (DQUOT_ALLOC_BLOCK(ip, nblocks)) {
return -EDQUOT;
}
rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno);
if (rc)
if (rc) {
/*Rollback quota allocation. */
DQUOT_FREE_BLOCK(ip, nblocks);
return rc;
}
/*
* Now have nblocks worth of storage to stuff into the FEALIST.
......@@ -315,6 +324,9 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
return 0;
failed:
/* Rollback quota allocation. */
DQUOT_FREE_BLOCK(ip, nblocks);
dbFree(ip, blkno, nblocks);
return rc;
}
......@@ -448,6 +460,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
int blocks_needed, current_blocks;
s64 blkno;
int rc;
int quota_allocation = 0;
/* When fsck.jfs clears a bad ea, it doesn't clear the size */
if (ji->ea.flag == 0)
......@@ -517,10 +530,16 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
sb->s_blocksize_bits;
if (blocks_needed > current_blocks) {
/* Allocate new blocks to quota. */
if (DQUOT_ALLOC_BLOCK(inode, blocks_needed))
return -EDQUOT;
quota_allocation = blocks_needed;
rc = dbAlloc(inode, INOHINT(inode), (s64) blocks_needed,
&blkno);
if (rc)
return rc;
goto clean_up;
DXDlength(&ea_buf->new_ea, blocks_needed);
DXDaddress(&ea_buf->new_ea, blkno);
......@@ -534,7 +553,8 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
1);
if (ea_buf->mp == NULL) {
dbFree(inode, blkno, (s64) blocks_needed);
return -EIO;
rc = -EIO;
goto clean_up;
}
ea_buf->xattr = ea_buf->mp->data;
ea_buf->max_size = (min_size + sb->s_blocksize - 1) &
......@@ -544,7 +564,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
if ((rc = ea_read(inode, ea_buf->xattr))) {
discard_metapage(ea_buf->mp);
dbFree(inode, blkno, (s64) blocks_needed);
return rc;
goto clean_up;
}
goto size_check;
}
......@@ -552,8 +572,10 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
ea_buf->mp = read_metapage(inode, addressDXD(&ji->ea),
lengthDXD(&ji->ea) << sb->s_blocksize_bits,
1);
if (ea_buf->mp == NULL)
return -EIO;
if (ea_buf->mp == NULL) {
rc = -EIO;
goto clean_up;
}
ea_buf->xattr = ea_buf->mp->data;
ea_buf->max_size = (ea_size + sb->s_blocksize - 1) &
~(sb->s_blocksize - 1);
......@@ -563,10 +585,18 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
printk(KERN_ERR "ea_get: invalid extended attribute\n");
dump_mem("xattr", ea_buf->xattr, ea_size);
ea_release(inode, ea_buf);
return -EIO;
rc = -EIO;
goto clean_up;
}
return ea_size;
clean_up:
/* Rollback quota allocation */
if (quota_allocation)
DQUOT_FREE_BLOCK(inode, quota_allocation);
return (rc);
}
static void ea_release(struct inode *inode, struct ea_buffer *ea_buf)
......@@ -640,7 +670,10 @@ static int ea_put(struct inode *inode, struct ea_buffer *ea_buf, int new_size)
ji->ea.size = 0;
}
inode->i_blocks += LBLK2PBLK(inode->i_sb, new_blocks - old_blocks);
/* If old blocks exist, they must be removed from quota allocation. */
if (old_blocks)
DQUOT_FREE_BLOCK(inode, old_blocks);
inode->i_ctime = CURRENT_TIME;
rc = txCommit(tid, 1, &inode, 0);
txEnd(tid);
......
......@@ -163,4 +163,16 @@ dma_cache_sync(void *vaddr, size_t size,
flush_write_buffers();
}
#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
extern int
dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dma_addr_t device_addr, size_t size, int flags);
extern void
dma_release_declared_memory(struct device *dev);
extern void *
dma_mark_declared_memory_occupied(struct device *dev,
dma_addr_t device_addr, size_t size);
#endif
......@@ -98,6 +98,9 @@ extern int bitmap_scnprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
extern int bitmap_parse(const char __user *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
#define BITMAP_LAST_WORD_MASK(nbits) \
( \
......
......@@ -284,6 +284,9 @@ struct device {
struct list_head dma_pools; /* dma pools (if dma'ble) */
struct dma_coherent_mem *dma_mem; /* internal for coherent mem
override */
void (*release)(struct device * dev);
};
......
#ifndef _ASM_LINUX_DMA_MAPPING_H
#define _ASM_LINUX_DMA_MAPPING_H
#include <linux/err.h>
/* These definitions mirror those in pci.h, so they can be used
* interchangeably with their PCI_ counterparts */
enum dma_data_direction {
......@@ -21,6 +23,33 @@ enum dma_data_direction {
extern u64 dma_get_required_mask(struct device *dev);
/* flags for the coherent memory api */
#define DMA_MEMORY_MAP 0x01
#define DMA_MEMORY_IO 0x02
#define DMA_MEMORY_INCLUDES_CHILDREN 0x04
#define DMA_MEMORY_EXCLUSIVE 0x08
#ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
static inline int
dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dma_addr_t device_addr, size_t size, int flags)
{
return 0;
}
static inline void
dma_release_declared_memory(struct device *dev)
{
}
static inline void *
dma_mark_declared_memory_occupied(struct device *dev,
dma_addr_t device_addr, size_t size)
{
return ERR_PTR(-EBUSY);
}
#endif
#endif
......@@ -408,3 +408,85 @@ int bitmap_parse(const char __user *ubuf, unsigned int ubuflen,
return 0;
}
EXPORT_SYMBOL(bitmap_parse);
/**
* bitmap_find_free_region - find a contiguous aligned mem region
* @bitmap: an array of unsigned longs corresponding to the bitmap
* @bits: number of bits in the bitmap
* @order: region size to find (size is actually 1<<order)
*
* This is used to allocate a memory region from a bitmap. The idea is
* that the region has to be 1<<order sized and 1<<order aligned (this
* makes the search algorithm much faster).
*
* The region is marked as set bits in the bitmap if a free one is
* found.
*
* Returns either beginning of region or negative error
*/
int bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
{
unsigned long mask;
int pages = 1 << order;
int i;
if(pages > BITS_PER_LONG)
return -EINVAL;
/* make a mask of the order */
mask = (1ul << (pages - 1));
mask += mask - 1;
/* run up the bitmap pages bits at a time */
for (i = 0; i < bits; i += pages) {
int index = i/BITS_PER_LONG;
int offset = i - (index * BITS_PER_LONG);
if((bitmap[index] & (mask << offset)) == 0) {
/* set region in bimap */
bitmap[index] |= (mask << offset);
return i;
}
}
return -ENOMEM;
}
EXPORT_SYMBOL(bitmap_find_free_region);
/**
* bitmap_release_region - release allocated bitmap region
* @bitmap: a pointer to the bitmap
* @pos: the beginning of the region
* @order: the order of the bits to release (number is 1<<order)
*
* This is the complement to __bitmap_find_free_region and releases
* the found region (by clearing it in the bitmap).
*/
void bitmap_release_region(unsigned long *bitmap, int pos, int order)
{
int pages = 1 << order;
unsigned long mask = (1ul << (pages - 1));
int index = pos/BITS_PER_LONG;
int offset = pos - (index * BITS_PER_LONG);
mask += mask - 1;
bitmap[index] &= ~(mask << offset);
}
EXPORT_SYMBOL(bitmap_release_region);
int bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
{
int pages = 1 << order;
unsigned long mask = (1ul << (pages - 1));
int index = pos/BITS_PER_LONG;
int offset = pos - (index * BITS_PER_LONG);
/* We don't do regions of pages > BITS_PER_LONG. The
* algorithm would be a simple look for multiple zeros in the
* array, but there's no driver today that needs this. If you
* trip this BUG(), you get to code it... */
BUG_ON(pages > BITS_PER_LONG);
mask += mask - 1;
if (bitmap[index] & (mask << offset))
return -EBUSY;
bitmap[index] |= (mask << offset);
return 0;
}
EXPORT_SYMBOL(bitmap_allocate_region);
......@@ -179,11 +179,26 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
return err;
}
#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end)
{
struct vm_struct **p, *tmp, *area;
unsigned long addr = start;
unsigned long align = 1;
unsigned long addr;
if (flags & VM_IOREMAP) {
int bit = fls(size);
if (bit > IOREMAP_MAX_ORDER)
bit = IOREMAP_MAX_ORDER;
else if (bit < PAGE_SHIFT)
bit = PAGE_SHIFT;
align = 1ul << bit;
}
addr = ALIGN(start, align);
area = kmalloc(sizeof(*area), GFP_KERNEL);
if (unlikely(!area))
......@@ -200,13 +215,17 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
write_lock(&vmlist_lock);
for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
if ((unsigned long)tmp->addr < addr)
if ((unsigned long)tmp->addr < addr) {
if((unsigned long)tmp->addr + tmp->size >= addr)
addr = ALIGN(tmp->size +
(unsigned long)tmp->addr, align);
continue;
}
if ((size + addr) < addr)
goto out;
if (size + addr <= (unsigned long)tmp->addr)
goto found;
addr = tmp->size + (unsigned long)tmp->addr;
addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
if (addr > end - size)
goto out;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment