Commit 885b2010 authored by Dave Jiang's avatar Dave Jiang Committed by Vinod Koul

dmaengine: ioatdma: remove dma_v2.*

Clean out dma_v2 and remove ioat2 calls since we are moving everything
to just ioat.
Signed-off-by: default avatarDave Jiang <dave.jiang@intel.com>
Acked-by: default avatarDan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent 55f878ec
obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
ioatdma-y := pci.o dma.o dma_v2.o dma_v3.o dca.o
ioatdma-y := pci.o dma.o dma_v3.o dca.o
......@@ -31,7 +31,6 @@
#include "dma.h"
#include "registers.h"
#include "dma_v2.h"
/*
* Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
......
This diff is collapsed.
......@@ -18,13 +18,14 @@
#define IOATDMA_H
#include <linux/dmaengine.h>
#include "hw.h"
#include "registers.h"
#include <linux/init.h>
#include <linux/dmapool.h>
#include <linux/cache.h>
#include <linux/pci_ids.h>
#include <net/tcp.h>
#include <linux/circ_buf.h>
#include <linux/interrupt.h>
#include "registers.h"
#include "hw.h"
#define IOAT_DMA_VERSION "4.00"
......@@ -154,6 +155,41 @@ struct ioat_sed_ent {
unsigned int hw_pool;
};
/**
* struct ioat_ring_ent - wrapper around hardware descriptor
* @hw: hardware DMA descriptor (for memcpy)
* @fill: hardware fill descriptor
* @xor: hardware xor descriptor
* @xor_ex: hardware xor extension descriptor
* @pq: hardware pq descriptor
* @pq_ex: hardware pq extension descriptor
* @pqu: hardware pq update descriptor
* @raw: hardware raw (un-typed) descriptor
* @txd: the generic software descriptor for all engines
* @len: total transaction length for unmap
* @result: asynchronous result of validate operations
* @id: identifier for debug
*/
struct ioat_ring_ent {
union {
struct ioat_dma_descriptor *hw;
struct ioat_xor_descriptor *xor;
struct ioat_xor_ext_descriptor *xor_ex;
struct ioat_pq_descriptor *pq;
struct ioat_pq_ext_descriptor *pq_ex;
struct ioat_pq_update_descriptor *pqu;
struct ioat_raw_descriptor *raw;
};
size_t len;
struct dma_async_tx_descriptor txd;
enum sum_check_flags *result;
#ifdef DEBUG
int id;
#endif
struct ioat_sed_ent *sed;
};
static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
{
return container_of(c, struct ioatdma_chan, dma_chan);
......@@ -291,6 +327,60 @@ static inline bool is_ioat_bug(unsigned long err)
return !!err;
}
#define IOAT_MAX_ORDER 16
#define ioat_get_alloc_order() \
(min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
#define ioat_get_max_alloc_order() \
(min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
{
return 1 << ioat_chan->alloc_order;
}
/* count of descriptors in flight with the engine */
static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan)
{
return CIRC_CNT(ioat_chan->head, ioat_chan->tail,
ioat_ring_size(ioat_chan));
}
/* count of descriptors pending submission to hardware */
static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan)
{
return CIRC_CNT(ioat_chan->head, ioat_chan->issued,
ioat_ring_size(ioat_chan));
}
static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan)
{
return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan);
}
static inline u16
ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len)
{
u16 num_descs = len >> ioat_chan->xfercap_log;
num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1));
return num_descs;
}
static inline struct ioat_ring_ent *
ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx)
{
return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)];
}
static inline void
ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
{
writel(addr & 0x00000000FFFFFFFF,
ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
writel(addr >> 32,
ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
}
int ioat_probe(struct ioatdma_device *ioat_dma);
int ioat_register(struct ioatdma_device *ioat_dma);
int ioat_dma_self_test(struct ioatdma_device *ioat_dma);
......@@ -306,7 +396,30 @@ void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type);
void ioat_kobject_del(struct ioatdma_device *ioat_dma);
int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
void ioat_stop(struct ioatdma_chan *ioat_chan);
int ioat_dma_probe(struct ioatdma_device *ioat_dma, int dca);
int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca);
struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
int ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
struct dma_async_tx_descriptor *
ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
dma_addr_t dma_src, size_t len, unsigned long flags);
void ioat_issue_pending(struct dma_chan *chan);
int ioat_alloc_chan_resources(struct dma_chan *c);
void ioat_free_chan_resources(struct dma_chan *c);
void __ioat_restart_chan(struct ioatdma_chan *ioat_chan);
bool reshape_ring(struct ioatdma_chan *ioat, int order);
void __ioat_issue_pending(struct ioatdma_chan *ioat_chan);
void ioat_timer_event(unsigned long data);
int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo);
int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo);
extern const struct sysfs_ops ioat_sysfs_ops;
extern struct ioat_sysfs_entry ioat_version_attr;
extern struct ioat_sysfs_entry ioat_cap_attr;
extern int ioat_pending_level;
extern int ioat_ring_alloc_order;
extern struct kobj_type ioat_ktype;
extern struct kmem_cache *ioat_cache;
#endif /* IOATDMA_H */
This diff is collapsed.
/*
* Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
#ifndef IOATDMA_V2_H
#define IOATDMA_V2_H
#include <linux/dmaengine.h>
#include <linux/circ_buf.h>
#include "dma.h"
#include "hw.h"
extern int ioat_pending_level;
extern int ioat_ring_alloc_order;
/*
* workaround for IOAT ver.3.0 null descriptor issue
* (channel returns error when size is 0)
*/
#define NULL_DESC_BUFFER_SIZE 1
#define IOAT_MAX_ORDER 16
#define ioat_get_alloc_order() \
(min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
#define ioat_get_max_alloc_order() \
(min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
static inline u32 ioat2_ring_size(struct ioatdma_chan *ioat_chan)
{
return 1 << ioat_chan->alloc_order;
}
/* count of descriptors in flight with the engine */
static inline u16 ioat2_ring_active(struct ioatdma_chan *ioat_chan)
{
return CIRC_CNT(ioat_chan->head, ioat_chan->tail,
ioat2_ring_size(ioat_chan));
}
/* count of descriptors pending submission to hardware */
static inline u16 ioat2_ring_pending(struct ioatdma_chan *ioat_chan)
{
return CIRC_CNT(ioat_chan->head, ioat_chan->issued,
ioat2_ring_size(ioat_chan));
}
static inline u32 ioat2_ring_space(struct ioatdma_chan *ioat_chan)
{
return ioat2_ring_size(ioat_chan) - ioat2_ring_active(ioat_chan);
}
static inline u16
ioat2_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len)
{
u16 num_descs = len >> ioat_chan->xfercap_log;
num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1));
return num_descs;
}
/**
* struct ioat_ring_ent - wrapper around hardware descriptor
* @hw: hardware DMA descriptor (for memcpy)
* @fill: hardware fill descriptor
* @xor: hardware xor descriptor
* @xor_ex: hardware xor extension descriptor
* @pq: hardware pq descriptor
* @pq_ex: hardware pq extension descriptor
* @pqu: hardware pq update descriptor
* @raw: hardware raw (un-typed) descriptor
* @txd: the generic software descriptor for all engines
* @len: total transaction length for unmap
* @result: asynchronous result of validate operations
* @id: identifier for debug
*/
struct ioat_ring_ent {
union {
struct ioat_dma_descriptor *hw;
struct ioat_xor_descriptor *xor;
struct ioat_xor_ext_descriptor *xor_ex;
struct ioat_pq_descriptor *pq;
struct ioat_pq_ext_descriptor *pq_ex;
struct ioat_pq_update_descriptor *pqu;
struct ioat_raw_descriptor *raw;
};
size_t len;
struct dma_async_tx_descriptor txd;
enum sum_check_flags *result;
#ifdef DEBUG
int id;
#endif
struct ioat_sed_ent *sed;
};
static inline struct ioat_ring_ent *
ioat2_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx)
{
return ioat_chan->ring[idx & (ioat2_ring_size(ioat_chan) - 1)];
}
static inline void
ioat2_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
{
writel(addr & 0x00000000FFFFFFFF,
ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
writel(addr >> 32,
ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
}
int ioat2_dma_probe(struct ioatdma_device *ioat_dma, int dca);
int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca);
struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
int ioat2_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
int ioat2_enumerate_channels(struct ioatdma_device *ioat_dma);
struct dma_async_tx_descriptor *
ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
dma_addr_t dma_src, size_t len, unsigned long flags);
void ioat2_issue_pending(struct dma_chan *chan);
int ioat2_alloc_chan_resources(struct dma_chan *c);
void ioat2_free_chan_resources(struct dma_chan *c);
void __ioat2_restart_chan(struct ioatdma_chan *ioat_chan);
bool reshape_ring(struct ioatdma_chan *ioat, int order);
void __ioat2_issue_pending(struct ioatdma_chan *ioat_chan);
void ioat2_timer_event(unsigned long data);
int ioat2_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo);
int ioat2_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo);
extern struct kobj_type ioat2_ktype;
extern struct kmem_cache *ioat2_cache;
#endif /* IOATDMA_V2_H */
......@@ -61,7 +61,6 @@
#include "registers.h"
#include "hw.h"
#include "dma.h"
#include "dma_v2.h"
extern struct kmem_cache *ioat3_sed_cache;
......@@ -390,13 +389,13 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
if (!phys_complete)
return;
active = ioat2_ring_active(ioat_chan);
active = ioat_ring_active(ioat_chan);
for (i = 0; i < active && !seen_current; i++) {
struct dma_async_tx_descriptor *tx;
smp_read_barrier_depends();
prefetch(ioat2_get_ring_ent(ioat_chan, idx + i + 1));
desc = ioat2_get_ring_ent(ioat_chan, idx + i);
prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
desc = ioat_get_ring_ent(ioat_chan, idx + i);
dump_desc_dbg(ioat_chan, desc);
/* set err stat if we are using dwbes */
......@@ -479,11 +478,11 @@ static void ioat3_restart_channel(struct ioatdma_chan *ioat_chan)
{
u64 phys_complete;
ioat2_quiesce(ioat_chan, 0);
ioat_quiesce(ioat_chan, 0);
if (ioat3_cleanup_preamble(ioat_chan, &phys_complete))
__cleanup(ioat_chan, phys_complete);
__ioat2_restart_chan(ioat_chan);
__ioat_restart_chan(ioat_chan);
}
static void ioat3_eh(struct ioatdma_chan *ioat_chan)
......@@ -507,7 +506,7 @@ static void ioat3_eh(struct ioatdma_chan *ioat_chan)
dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
__func__, chanerr, chanerr_int);
desc = ioat2_get_ring_ent(ioat_chan, ioat_chan->tail);
desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
hw = desc->hw;
dump_desc_dbg(ioat_chan, desc);
......@@ -561,7 +560,7 @@ static void ioat3_eh(struct ioatdma_chan *ioat_chan)
static void check_active(struct ioatdma_chan *ioat_chan)
{
if (ioat2_ring_active(ioat_chan)) {
if (ioat_ring_active(ioat_chan)) {
mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
return;
}
......@@ -625,7 +624,7 @@ static void ioat3_timer_event(unsigned long data)
}
if (ioat2_ring_active(ioat_chan))
if (ioat_ring_active(ioat_chan))
mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
else {
spin_lock_bh(&ioat_chan->prep_lock);
......@@ -670,7 +669,7 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
BUG_ON(src_cnt < 2);
num_descs = ioat2_xferlen_to_descs(ioat_chan, len);
num_descs = ioat_xferlen_to_descs(ioat_chan, len);
/* we need 2x the number of descriptors to cover greater than 5
* sources
*/
......@@ -686,7 +685,7 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
* order.
*/
if (likely(num_descs) &&
ioat2_check_space_lock(ioat_chan, num_descs+1) == 0)
ioat_check_space_lock(ioat_chan, num_descs+1) == 0)
idx = ioat_chan->head;
else
return NULL;
......@@ -697,14 +696,14 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
len, 1 << ioat_chan->xfercap_log);
int s;
desc = ioat2_get_ring_ent(ioat_chan, idx + i);
desc = ioat_get_ring_ent(ioat_chan, idx + i);
xor = desc->xor;
/* save a branch by unconditionally retrieving the
* extended descriptor xor_set_src() knows to not write
* to it in the single descriptor case
*/
ext = ioat2_get_ring_ent(ioat_chan, idx + i + 1);
ext = ioat_get_ring_ent(ioat_chan, idx + i + 1);
xor_ex = ext->xor_ex;
descs[0] = (struct ioat_raw_descriptor *) xor;
......@@ -730,7 +729,7 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
/* completion descriptor carries interrupt bit */
compl_desc = ioat2_get_ring_ent(ioat_chan, idx + i);
compl_desc = ioat_get_ring_ent(ioat_chan, idx + i);
compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
hw = compl_desc->hw;
hw->ctl = 0;
......@@ -854,7 +853,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
*/
BUG_ON(src_cnt + dmaf_continue(flags) < 2);
num_descs = ioat2_xferlen_to_descs(ioat_chan, len);
num_descs = ioat_xferlen_to_descs(ioat_chan, len);
/* we need 2x the number of descriptors to cover greater than 3
* sources (we need 1 extra source in the q-only continuation
* case and 3 extra sources in the p+q continuation case.
......@@ -872,7 +871,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
* order.
*/
if (likely(num_descs) &&
ioat2_check_space_lock(ioat_chan, num_descs + cb32) == 0)
ioat_check_space_lock(ioat_chan, num_descs + cb32) == 0)
idx = ioat_chan->head;
else
return NULL;
......@@ -882,14 +881,14 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
size_t xfer_size = min_t(size_t, len,
1 << ioat_chan->xfercap_log);
desc = ioat2_get_ring_ent(ioat_chan, idx + i);
desc = ioat_get_ring_ent(ioat_chan, idx + i);
pq = desc->pq;
/* save a branch by unconditionally retrieving the
* extended descriptor pq_set_src() knows to not write
* to it in the single descriptor case
*/
ext = ioat2_get_ring_ent(ioat_chan, idx + i + with_ext);
ext = ioat_get_ring_ent(ioat_chan, idx + i + with_ext);
pq_ex = ext->pq_ex;
descs[0] = (struct ioat_raw_descriptor *) pq;
......@@ -936,7 +935,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
compl_desc = desc;
} else {
/* completion descriptor carries interrupt bit */
compl_desc = ioat2_get_ring_ent(ioat_chan, idx + i);
compl_desc = ioat_get_ring_ent(ioat_chan, idx + i);
compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
hw = compl_desc->hw;
hw->ctl = 0;
......@@ -972,13 +971,13 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
dev_dbg(to_dev(ioat_chan), "%s\n", __func__);
num_descs = ioat2_xferlen_to_descs(ioat_chan, len);
num_descs = ioat_xferlen_to_descs(ioat_chan, len);
/*
* 16 source pq is only available on cb3.3 and has no completion
* write hw bug.
*/
if (num_descs && ioat2_check_space_lock(ioat_chan, num_descs) == 0)
if (num_descs && ioat_check_space_lock(ioat_chan, num_descs) == 0)
idx = ioat_chan->head;
else
return NULL;
......@@ -990,7 +989,7 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
size_t xfer_size = min_t(size_t, len,
1 << ioat_chan->xfercap_log);
desc = ioat2_get_ring_ent(ioat_chan, idx + i);
desc = ioat_get_ring_ent(ioat_chan, idx + i);
pq = desc->pq;
descs[0] = (struct ioat_raw_descriptor *) pq;
......@@ -1177,8 +1176,8 @@ ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
struct ioat_ring_ent *desc;
struct ioat_dma_descriptor *hw;
if (ioat2_check_space_lock(ioat_chan, 1) == 0)
desc = ioat2_get_ring_ent(ioat_chan, ioat_chan->head);
if (ioat_check_space_lock(ioat_chan, 1) == 0)
desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
else
return NULL;
......@@ -1533,7 +1532,7 @@ static int ioat3_reset_hw(struct ioatdma_chan *ioat_chan)
u16 dev_id;
int err;
ioat2_quiesce(ioat_chan, msecs_to_jiffies(100));
ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
......@@ -1561,7 +1560,7 @@ static int ioat3_reset_hw(struct ioatdma_chan *ioat_chan)
}
}
err = ioat2_reset_sync(ioat_chan, msecs_to_jiffies(200));
err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
if (!err)
err = ioat3_irq_reinit(ioat_dma);
......@@ -1607,15 +1606,15 @@ int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
bool is_raid_device = false;
int err;
ioat_dma->enumerate_channels = ioat2_enumerate_channels;
ioat_dma->enumerate_channels = ioat_enumerate_channels;
ioat_dma->reset_hw = ioat3_reset_hw;
ioat_dma->self_test = ioat3_dma_self_test;
ioat_dma->intr_quirk = ioat3_intr_quirk;
dma = &ioat_dma->dma_dev;
dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
dma->device_issue_pending = ioat2_issue_pending;
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock;
dma->device_issue_pending = ioat_issue_pending;
dma->device_alloc_chan_resources = ioat_alloc_chan_resources;
dma->device_free_chan_resources = ioat_free_chan_resources;
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
......@@ -1708,7 +1707,7 @@ int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
if (err)
return err;
ioat_kobject_add(ioat_dma, &ioat2_ktype);
ioat_kobject_add(ioat_dma, &ioat_ktype);
if (dca)
ioat_dma->dca = ioat3_dca_init(pdev, ioat_dma->reg_base);
......
......@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include "dma.h"
#include "dma_v2.h"
#include "registers.h"
#include "hw.h"
......@@ -115,7 +114,7 @@ static int ioat_dca_enabled = 1;
module_param(ioat_dca_enabled, int, 0644);
MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
struct kmem_cache *ioat2_cache;
struct kmem_cache *ioat_cache;
struct kmem_cache *ioat3_sed_cache;
#define DRV_NAME "ioatdma"
......@@ -246,14 +245,14 @@ static int __init ioat_init_module(void)
pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
DRV_NAME, IOAT_DMA_VERSION);
ioat2_cache = kmem_cache_create("ioat2", sizeof(struct ioat_ring_ent),
ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!ioat2_cache)
if (!ioat_cache)
return -ENOMEM;
ioat3_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
if (!ioat3_sed_cache)
goto err_ioat2_cache;
goto err_ioat_cache;
err = pci_register_driver(&ioat_pci_driver);
if (err)
......@@ -264,8 +263,8 @@ static int __init ioat_init_module(void)
err_ioat3_cache:
kmem_cache_destroy(ioat3_sed_cache);
err_ioat2_cache:
kmem_cache_destroy(ioat2_cache);
err_ioat_cache:
kmem_cache_destroy(ioat_cache);
return err;
}
......@@ -274,6 +273,6 @@ module_init(ioat_init_module);
static void __exit ioat_exit_module(void)
{
pci_unregister_driver(&ioat_pci_driver);
kmem_cache_destroy(ioat2_cache);
kmem_cache_destroy(ioat_cache);
}
module_exit(ioat_exit_module);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment