Commit de3910eb authored by Mauro Carvalho Chehab's avatar Mauro Carvalho Chehab

edac: change the mem allocation scheme to make Documentation/kobject.txt happy

Kernel kobjects have rigid rules: each container object should be
dynamically allocated, and can't be allocated into a single kmalloc.

EDAC never obeyed this rule: it has a single malloc function that
allocates all needed data into a single kzalloc.

As this is not accepted anymore, change the allocation schema of the
EDAC *_info structs to enforce this Kernel standard.
Acked-by: default avatarChris Metcalf <cmetcalf@tilera.com>
Cc: Aristeu Rozanski <arozansk@redhat.com>
Cc: Doug Thompson <norsk5@yahoo.com>
Cc: Greg K H <gregkh@linuxfoundation.org>
Cc: Borislav Petkov <borislav.petkov@amd.com>
Cc: Mark Gross <mark.gross@intel.com>
Cc: Tim Small <tim@buttersideup.com>
Cc: Ranganathan Desikan <ravi@jetztechnologies.com>
Cc: "Arvind R." <arvino55@gmail.com>
Cc: Olof Johansson <olof@lixom.net>
Cc: Egor Martovetsky <egor@pasemi.com>
Cc: Michal Marek <mmarek@suse.cz>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Hitoshi Mitake <h.mitake@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Shaohui Xie <Shaohui.Xie@freescale.com>
Cc: linuxppc-dev@lists.ozlabs.org
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab@redhat.com>
parent e39f4ea9
...@@ -2205,6 +2205,7 @@ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) ...@@ -2205,6 +2205,7 @@ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
static int init_csrows(struct mem_ctl_info *mci) static int init_csrows(struct mem_ctl_info *mci)
{ {
struct csrow_info *csrow; struct csrow_info *csrow;
struct dimm_info *dimm;
struct amd64_pvt *pvt = mci->pvt_info; struct amd64_pvt *pvt = mci->pvt_info;
u64 base, mask; u64 base, mask;
u32 val; u32 val;
...@@ -2222,7 +2223,7 @@ static int init_csrows(struct mem_ctl_info *mci) ...@@ -2222,7 +2223,7 @@ static int init_csrows(struct mem_ctl_info *mci)
!!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE)); !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
for_each_chip_select(i, 0, pvt) { for_each_chip_select(i, 0, pvt) {
csrow = &mci->csrows[i]; csrow = mci->csrows[i];
if (!csrow_enabled(i, 0, pvt) && !csrow_enabled(i, 1, pvt)) { if (!csrow_enabled(i, 0, pvt) && !csrow_enabled(i, 1, pvt)) {
debugf1("----CSROW %d EMPTY for node %d\n", i, debugf1("----CSROW %d EMPTY for node %d\n", i,
...@@ -2257,9 +2258,10 @@ static int init_csrows(struct mem_ctl_info *mci) ...@@ -2257,9 +2258,10 @@ static int init_csrows(struct mem_ctl_info *mci)
edac_mode = EDAC_NONE; edac_mode = EDAC_NONE;
for (j = 0; j < pvt->channel_count; j++) { for (j = 0; j < pvt->channel_count; j++) {
csrow->channels[j].dimm->mtype = mtype; dimm = csrow->channels[j]->dimm;
csrow->channels[j].dimm->edac_mode = edac_mode; dimm->mtype = mtype;
csrow->channels[j].dimm->nr_pages = nr_pages; dimm->edac_mode = edac_mode;
dimm->nr_pages = nr_pages;
} }
} }
......
...@@ -146,7 +146,7 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci, ...@@ -146,7 +146,7 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
if (handle_errors) { if (handle_errors) {
row = (info->ecc_mode_status >> 4) & 0xf; row = (info->ecc_mode_status >> 4) & 0xf;
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
mci->csrows[row].first_page, 0, 0, mci->csrows[row]->first_page, 0, 0,
row, 0, -1, row, 0, -1,
mci->ctl_name, "", NULL); mci->ctl_name, "", NULL);
} }
...@@ -161,7 +161,7 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci, ...@@ -161,7 +161,7 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
if (handle_errors) { if (handle_errors) {
row = info->ecc_mode_status & 0xf; row = info->ecc_mode_status & 0xf;
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
mci->csrows[row].first_page, 0, 0, mci->csrows[row]->first_page, 0, 0,
row, 0, -1, row, 0, -1,
mci->ctl_name, "", NULL); mci->ctl_name, "", NULL);
} }
...@@ -194,8 +194,8 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, ...@@ -194,8 +194,8 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
int index; int index;
for (index = 0; index < mci->nr_csrows; index++) { for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index]; csrow = mci->csrows[index];
dimm = csrow->channels[0].dimm; dimm = csrow->channels[0]->dimm;
/* find the DRAM Chip Select Base address and mask */ /* find the DRAM Chip Select Base address and mask */
pci_read_config_dword(pdev, pci_read_config_dword(pdev,
......
...@@ -33,7 +33,7 @@ struct cell_edac_priv ...@@ -33,7 +33,7 @@ struct cell_edac_priv
static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar) static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
{ {
struct cell_edac_priv *priv = mci->pvt_info; struct cell_edac_priv *priv = mci->pvt_info;
struct csrow_info *csrow = &mci->csrows[0]; struct csrow_info *csrow = mci->csrows[0];
unsigned long address, pfn, offset, syndrome; unsigned long address, pfn, offset, syndrome;
dev_dbg(mci->pdev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n", dev_dbg(mci->pdev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n",
...@@ -56,7 +56,7 @@ static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar) ...@@ -56,7 +56,7 @@ static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar) static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
{ {
struct cell_edac_priv *priv = mci->pvt_info; struct cell_edac_priv *priv = mci->pvt_info;
struct csrow_info *csrow = &mci->csrows[0]; struct csrow_info *csrow = mci->csrows[0];
unsigned long address, pfn, offset; unsigned long address, pfn, offset;
dev_dbg(mci->pdev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n", dev_dbg(mci->pdev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n",
...@@ -126,7 +126,7 @@ static void cell_edac_check(struct mem_ctl_info *mci) ...@@ -126,7 +126,7 @@ static void cell_edac_check(struct mem_ctl_info *mci)
static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci) static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
{ {
struct csrow_info *csrow = &mci->csrows[0]; struct csrow_info *csrow = mci->csrows[0];
struct dimm_info *dimm; struct dimm_info *dimm;
struct cell_edac_priv *priv = mci->pvt_info; struct cell_edac_priv *priv = mci->pvt_info;
struct device_node *np; struct device_node *np;
...@@ -150,7 +150,7 @@ static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci) ...@@ -150,7 +150,7 @@ static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
csrow->last_page = csrow->first_page + nr_pages - 1; csrow->last_page = csrow->first_page + nr_pages - 1;
for (j = 0; j < csrow->nr_channels; j++) { for (j = 0; j < csrow->nr_channels; j++) {
dimm = csrow->channels[j].dimm; dimm = csrow->channels[j]->dimm;
dimm->mtype = MEM_XDR; dimm->mtype = MEM_XDR;
dimm->edac_mode = EDAC_SECDED; dimm->edac_mode = EDAC_SECDED;
dimm->nr_pages = nr_pages / csrow->nr_channels; dimm->nr_pages = nr_pages / csrow->nr_channels;
......
...@@ -348,7 +348,7 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci) ...@@ -348,7 +348,7 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
if (bba == 0) if (bba == 0)
continue; /* not populated */ continue; /* not populated */
csrow = &mci->csrows[index]; csrow = mci->csrows[index];
row_size = bba * (1UL << 28); /* 256M */ row_size = bba * (1UL << 28); /* 256M */
csrow->first_page = last_nr_pages; csrow->first_page = last_nr_pages;
...@@ -380,7 +380,7 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci) ...@@ -380,7 +380,7 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
break; break;
} }
for (j = 0; j < csrow->nr_channels; j++) { for (j = 0; j < csrow->nr_channels; j++) {
dimm = csrow->channels[j].dimm; dimm = csrow->channels[j]->dimm;
dimm->nr_pages = nr_pages / csrow->nr_channels; dimm->nr_pages = nr_pages / csrow->nr_channels;
dimm->mtype = MEM_RDDR; dimm->mtype = MEM_RDDR;
dimm->edac_mode = EDAC_SECDED; dimm->edac_mode = EDAC_SECDED;
...@@ -463,7 +463,7 @@ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear, ...@@ -463,7 +463,7 @@ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear,
*csrow = rank; *csrow = rank;
#ifdef CONFIG_EDAC_DEBUG #ifdef CONFIG_EDAC_DEBUG
if (mci->csrows[rank].first_page == 0) { if (mci->csrows[rank]->first_page == 0) {
cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a " cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a "
"non-populated csrow, broken hardware?\n"); "non-populated csrow, broken hardware?\n");
return; return;
...@@ -471,7 +471,7 @@ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear, ...@@ -471,7 +471,7 @@ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear,
#endif #endif
/* Revert csrow number */ /* Revert csrow number */
pa = mci->csrows[rank].first_page << PAGE_SHIFT; pa = mci->csrows[rank]->first_page << PAGE_SHIFT;
/* Revert column address */ /* Revert column address */
col += bcnt; col += bcnt;
......
...@@ -1096,7 +1096,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, ...@@ -1096,7 +1096,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
/* mem_dev 0=x8, 1=x4 */ /* mem_dev 0=x8, 1=x4 */
mem_dev = (dra >> (index * 4 + 2)) & 0x3; mem_dev = (dra >> (index * 4 + 2)) & 0x3;
csrow = &mci->csrows[remap_csrow_index(mci, index)]; csrow = mci->csrows[remap_csrow_index(mci, index)];
mem_dev = (mem_dev == 2); mem_dev = (mem_dev == 2);
pci_read_config_byte(pdev, E752X_DRB + index, &value); pci_read_config_byte(pdev, E752X_DRB + index, &value);
...@@ -1127,7 +1127,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, ...@@ -1127,7 +1127,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
} else } else
edac_mode = EDAC_NONE; edac_mode = EDAC_NONE;
for (i = 0; i < csrow->nr_channels; i++) { for (i = 0; i < csrow->nr_channels; i++) {
struct dimm_info *dimm = csrow->channels[i].dimm; struct dimm_info *dimm = csrow->channels[i]->dimm;
debugf3("Initializing rank at (%i,%i)\n", index, i); debugf3("Initializing rank at (%i,%i)\n", index, i);
dimm->nr_pages = nr_pages / csrow->nr_channels; dimm->nr_pages = nr_pages / csrow->nr_channels;
......
...@@ -378,7 +378,7 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, ...@@ -378,7 +378,7 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
for (index = 0; index < mci->nr_csrows; index++) { for (index = 0; index < mci->nr_csrows; index++) {
/* mem_dev 0=x8, 1=x4 */ /* mem_dev 0=x8, 1=x4 */
mem_dev = (dra >> (index * 4 + 3)) & 0x1; mem_dev = (dra >> (index * 4 + 3)) & 0x1;
csrow = &mci->csrows[index]; csrow = mci->csrows[index];
pci_read_config_byte(pdev, E7XXX_DRB + index, &value); pci_read_config_byte(pdev, E7XXX_DRB + index, &value);
/* convert a 64 or 32 MiB DRB to a page size. */ /* convert a 64 or 32 MiB DRB to a page size. */
...@@ -409,7 +409,7 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, ...@@ -409,7 +409,7 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
edac_mode = EDAC_NONE; edac_mode = EDAC_NONE;
for (j = 0; j < drc_chan + 1; j++) { for (j = 0; j < drc_chan + 1; j++) {
dimm = csrow->channels[j].dimm; dimm = csrow->channels[j]->dimm;
dimm->nr_pages = nr_pages / (drc_chan + 1); dimm->nr_pages = nr_pages / (drc_chan + 1);
dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */ dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
......
...@@ -210,15 +210,15 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, ...@@ -210,15 +210,15 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
{ {
struct mem_ctl_info *mci; struct mem_ctl_info *mci;
struct edac_mc_layer *layer; struct edac_mc_layer *layer;
struct csrow_info *csi, *csr; struct csrow_info *csr;
struct rank_info *chi, *chp, *chan; struct rank_info *chan;
struct dimm_info *dimm; struct dimm_info *dimm;
u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS]; u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
unsigned pos[EDAC_MAX_LAYERS]; unsigned pos[EDAC_MAX_LAYERS];
unsigned size, tot_dimms = 1, count = 1; unsigned size, tot_dimms = 1, count = 1;
unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0; unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
void *pvt, *p, *ptr = NULL; void *pvt, *p, *ptr = NULL;
int i, j, row, chn, n, len; int i, j, row, chn, n, len, off;
bool per_rank = false; bool per_rank = false;
BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0); BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
...@@ -244,9 +244,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, ...@@ -244,9 +244,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
*/ */
mci = edac_align_ptr(&ptr, sizeof(*mci), 1); mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers); layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
csi = edac_align_ptr(&ptr, sizeof(*csi), tot_csrows);
chi = edac_align_ptr(&ptr, sizeof(*chi), tot_csrows * tot_channels);
dimm = edac_align_ptr(&ptr, sizeof(*dimm), tot_dimms);
for (i = 0; i < n_layers; i++) { for (i = 0; i < n_layers; i++) {
count *= layers[i].size; count *= layers[i].size;
debugf4("%s: errcount layer %d size %d\n", __func__, i, count); debugf4("%s: errcount layer %d size %d\n", __func__, i, count);
...@@ -264,6 +261,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, ...@@ -264,6 +261,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
tot_dimms, tot_dimms,
per_rank ? "ranks" : "dimms", per_rank ? "ranks" : "dimms",
tot_csrows * tot_channels); tot_csrows * tot_channels);
mci = kzalloc(size, GFP_KERNEL); mci = kzalloc(size, GFP_KERNEL);
if (mci == NULL) if (mci == NULL)
return NULL; return NULL;
...@@ -272,9 +270,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, ...@@ -272,9 +270,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
* rather than an imaginary chunk of memory located at address 0. * rather than an imaginary chunk of memory located at address 0.
*/ */
layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer)); layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi));
chi = (struct rank_info *)(((char *)mci) + ((unsigned long)chi));
dimm = (struct dimm_info *)(((char *)mci) + ((unsigned long)dimm));
for (i = 0; i < n_layers; i++) { for (i = 0; i < n_layers; i++) {
mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i])); mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i])); mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
...@@ -283,8 +278,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, ...@@ -283,8 +278,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
/* setup index and various internal pointers */ /* setup index and various internal pointers */
mci->mc_idx = mc_num; mci->mc_idx = mc_num;
mci->csrows = csi;
mci->dimms = dimm;
mci->tot_dimms = tot_dimms; mci->tot_dimms = tot_dimms;
mci->pvt_info = pvt; mci->pvt_info = pvt;
mci->n_layers = n_layers; mci->n_layers = n_layers;
...@@ -295,39 +288,60 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, ...@@ -295,39 +288,60 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
mci->mem_is_per_rank = per_rank; mci->mem_is_per_rank = per_rank;
/* /*
* Fill the csrow struct * Alocate and fill the csrow/channels structs
*/ */
mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL);
if (!mci->csrows)
goto error;
for (row = 0; row < tot_csrows; row++) { for (row = 0; row < tot_csrows; row++) {
csr = &csi[row]; csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
if (!csr)
goto error;
mci->csrows[row] = csr;
csr->csrow_idx = row; csr->csrow_idx = row;
csr->mci = mci; csr->mci = mci;
csr->nr_channels = tot_channels; csr->nr_channels = tot_channels;
chp = &chi[row * tot_channels]; csr->channels = kcalloc(sizeof(*csr->channels), tot_channels,
csr->channels = chp; GFP_KERNEL);
if (!csr->channels)
goto error;
for (chn = 0; chn < tot_channels; chn++) { for (chn = 0; chn < tot_channels; chn++) {
chan = &chp[chn]; chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL);
if (!chan)
goto error;
csr->channels[chn] = chan;
chan->chan_idx = chn; chan->chan_idx = chn;
chan->csrow = csr; chan->csrow = csr;
} }
} }
/* /*
* Fill the dimm struct * Allocate and fill the dimm structs
*/ */
mci->dimms = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL);
if (!mci->dimms)
goto error;
memset(&pos, 0, sizeof(pos)); memset(&pos, 0, sizeof(pos));
row = 0; row = 0;
chn = 0; chn = 0;
debugf4("%s: initializing %d %s\n", __func__, tot_dimms, debugf4("%s: initializing %d %s\n", __func__, tot_dimms,
per_rank ? "ranks" : "dimms"); per_rank ? "ranks" : "dimms");
for (i = 0; i < tot_dimms; i++) { for (i = 0; i < tot_dimms; i++) {
chan = &csi[row].channels[chn]; chan = mci->csrows[row]->channels[chn];
dimm = EDAC_DIMM_PTR(layer, mci->dimms, n_layers, off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]);
pos[0], pos[1], pos[2]); if (off < 0 || off >= tot_dimms) {
edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n");
goto error;
}
dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
mci->dimms[off] = dimm;
dimm->mci = mci; dimm->mci = mci;
debugf2("%s: %d: %s%zd (%d:%d:%d): row %d, chan %d\n", __func__, debugf2("%s: %d: %s%i (%d:%d:%d): row %d, chan %d\n", __func__,
i, per_rank ? "rank" : "dimm", (dimm - mci->dimms), i, per_rank ? "rank" : "dimm", off,
pos[0], pos[1], pos[2], row, chn); pos[0], pos[1], pos[2], row, chn);
/* /*
...@@ -381,6 +395,28 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, ...@@ -381,6 +395,28 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
*/ */
return mci; return mci;
error:
if (mci->dimms) {
for (i = 0; i < tot_dimms; i++)
kfree(mci->dimms[i]);
kfree(mci->dimms);
}
if (mci->csrows) {
for (chn = 0; chn < tot_channels; chn++) {
csr = mci->csrows[chn];
if (csr) {
for (chn = 0; chn < tot_channels; chn++)
kfree(csr->channels[chn]);
kfree(csr);
}
kfree(mci->csrows[i]);
}
kfree(mci->csrows);
}
kfree(mci);
return NULL;
} }
EXPORT_SYMBOL_GPL(edac_mc_alloc); EXPORT_SYMBOL_GPL(edac_mc_alloc);
...@@ -393,10 +429,8 @@ void edac_mc_free(struct mem_ctl_info *mci) ...@@ -393,10 +429,8 @@ void edac_mc_free(struct mem_ctl_info *mci)
{ {
debugf1("%s()\n", __func__); debugf1("%s()\n", __func__);
/* the mci instance is freed here, when the sysfs object is dropped */
edac_unregister_sysfs(mci); edac_unregister_sysfs(mci);
/* free the mci instance memory here */
kfree(mci);
} }
EXPORT_SYMBOL_GPL(edac_mc_free); EXPORT_SYMBOL_GPL(edac_mc_free);
...@@ -668,13 +702,12 @@ int edac_mc_add_mc(struct mem_ctl_info *mci) ...@@ -668,13 +702,12 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
for (i = 0; i < mci->nr_csrows; i++) { for (i = 0; i < mci->nr_csrows; i++) {
int j; int j;
edac_mc_dump_csrow(&mci->csrows[i]); edac_mc_dump_csrow(mci->csrows[i]);
for (j = 0; j < mci->csrows[i].nr_channels; j++) for (j = 0; j < mci->csrows[i]->nr_channels; j++)
edac_mc_dump_channel(&mci->csrows[i]. edac_mc_dump_channel(mci->csrows[i]->channels[j]);
channels[j]);
} }
for (i = 0; i < mci->tot_dimms; i++) for (i = 0; i < mci->tot_dimms; i++)
edac_mc_dump_dimm(&mci->dimms[i]); edac_mc_dump_dimm(mci->dimms[i]);
} }
#endif #endif
mutex_lock(&mem_ctls_mutex); mutex_lock(&mem_ctls_mutex);
...@@ -793,17 +826,17 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset, ...@@ -793,17 +826,17 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
/* FIXME - should return -1 */ /* FIXME - should return -1 */
int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
{ {
struct csrow_info *csrows = mci->csrows; struct csrow_info **csrows = mci->csrows;
int row, i, j, n; int row, i, j, n;
debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page); debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
row = -1; row = -1;
for (i = 0; i < mci->nr_csrows; i++) { for (i = 0; i < mci->nr_csrows; i++) {
struct csrow_info *csrow = &csrows[i]; struct csrow_info *csrow = csrows[i];
n = 0; n = 0;
for (j = 0; j < csrow->nr_channels; j++) { for (j = 0; j < csrow->nr_channels; j++) {
struct dimm_info *dimm = csrow->channels[j].dimm; struct dimm_info *dimm = csrow->channels[j]->dimm;
n += dimm->nr_pages; n += dimm->nr_pages;
} }
if (n == 0) if (n == 0)
...@@ -1062,7 +1095,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type, ...@@ -1062,7 +1095,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
p = label; p = label;
*p = '\0'; *p = '\0';
for (i = 0; i < mci->tot_dimms; i++) { for (i = 0; i < mci->tot_dimms; i++) {
struct dimm_info *dimm = &mci->dimms[i]; struct dimm_info *dimm = mci->dimms[i];
if (top_layer >= 0 && top_layer != dimm->location[0]) if (top_layer >= 0 && top_layer != dimm->location[0])
continue; continue;
...@@ -1120,13 +1153,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type, ...@@ -1120,13 +1153,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
strcpy(label, "unknown memory"); strcpy(label, "unknown memory");
if (type == HW_EVENT_ERR_CORRECTED) { if (type == HW_EVENT_ERR_CORRECTED) {
if (row >= 0) { if (row >= 0) {
mci->csrows[row].ce_count++; mci->csrows[row]->ce_count++;
if (chan >= 0) if (chan >= 0)
mci->csrows[row].channels[chan].ce_count++; mci->csrows[row]->channels[chan]->ce_count++;
} }
} else } else
if (row >= 0) if (row >= 0)
mci->csrows[row].ue_count++; mci->csrows[row]->ue_count++;
} }
/* Fill the RAM location data */ /* Fill the RAM location data */
......
This diff is collapsed.
...@@ -236,7 +236,7 @@ static int i3000_process_error_info(struct mem_ctl_info *mci, ...@@ -236,7 +236,7 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
int row, multi_chan, channel; int row, multi_chan, channel;
unsigned long pfn, offset; unsigned long pfn, offset;
multi_chan = mci->csrows[0].nr_channels - 1; multi_chan = mci->csrows[0]->nr_channels - 1;
if (!(info->errsts & I3000_ERRSTS_BITS)) if (!(info->errsts & I3000_ERRSTS_BITS))
return 0; return 0;
...@@ -393,7 +393,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx) ...@@ -393,7 +393,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) { for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) {
u8 value; u8 value;
u32 cumul_size; u32 cumul_size;
struct csrow_info *csrow = &mci->csrows[i]; struct csrow_info *csrow = mci->csrows[i];
value = drb[i]; value = drb[i];
cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT); cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT);
...@@ -410,7 +410,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx) ...@@ -410,7 +410,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
last_cumul_size = cumul_size; last_cumul_size = cumul_size;
for (j = 0; j < nr_channels; j++) { for (j = 0; j < nr_channels; j++) {
struct dimm_info *dimm = csrow->channels[j].dimm; struct dimm_info *dimm = csrow->channels[j]->dimm;
dimm->nr_pages = nr_pages / nr_channels; dimm->nr_pages = nr_pages / nr_channels;
dimm->grain = I3000_DEAP_GRAIN; dimm->grain = I3000_DEAP_GRAIN;
......
...@@ -379,7 +379,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx) ...@@ -379,7 +379,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
*/ */
for (i = 0; i < mci->nr_csrows; i++) { for (i = 0; i < mci->nr_csrows; i++) {
unsigned long nr_pages; unsigned long nr_pages;
struct csrow_info *csrow = &mci->csrows[i]; struct csrow_info *csrow = mci->csrows[i];
nr_pages = drb_to_nr_pages(drbs, stacked, nr_pages = drb_to_nr_pages(drbs, stacked,
i / I3200_RANKS_PER_CHANNEL, i / I3200_RANKS_PER_CHANNEL,
...@@ -389,7 +389,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx) ...@@ -389,7 +389,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
continue; continue;
for (j = 0; j < nr_channels; j++) { for (j = 0; j < nr_channels; j++) {
struct dimm_info *dimm = csrow->channels[j].dimm; struct dimm_info *dimm = csrow->channels[j]->dimm;
dimm->nr_pages = nr_pages / nr_channels; dimm->nr_pages = nr_pages / nr_channels;
dimm->grain = nr_pages << PAGE_SHIFT; dimm->grain = nr_pages << PAGE_SHIFT;
......
...@@ -1203,8 +1203,8 @@ static int i5400_init_dimms(struct mem_ctl_info *mci) ...@@ -1203,8 +1203,8 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
size_mb = pvt->dimm_info[slot][channel].megabytes; size_mb = pvt->dimm_info[slot][channel].megabytes;
debugf2("%s: dimm%zd (branch %d channel %d slot %d): %d.%03d GB\n", debugf2("%s: dimm (branch %d channel %d slot %d): %d.%03d GB\n",
__func__, dimm - mci->dimms, __func__,
channel / 2, channel % 2, slot, channel / 2, channel % 2, slot,
size_mb / 1000, size_mb % 1000); size_mb / 1000, size_mb % 1000);
...@@ -1227,7 +1227,7 @@ static int i5400_init_dimms(struct mem_ctl_info *mci) ...@@ -1227,7 +1227,7 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
* With such single-DIMM mode, the SDCC algorithm degrades to SECDEC+. * With such single-DIMM mode, the SDCC algorithm degrades to SECDEC+.
*/ */
if (ndimms == 1) if (ndimms == 1)
mci->dimms[0].edac_mode = EDAC_SECDED; mci->dimms[0]->edac_mode = EDAC_SECDED;
return (ndimms == 0); return (ndimms == 0);
} }
......
...@@ -197,8 +197,8 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci, ...@@ -197,8 +197,8 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc); pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc);
row_high_limit_last = 0; row_high_limit_last = 0;
for (index = 0; index < mci->nr_csrows; index++) { for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index]; csrow = mci->csrows[index];
dimm = csrow->channels[0].dimm; dimm = csrow->channels[0]->dimm;
pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar); pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n", debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n",
......
...@@ -116,7 +116,7 @@ static int i82860_process_error_info(struct mem_ctl_info *mci, ...@@ -116,7 +116,7 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
info->eap >>= PAGE_SHIFT; info->eap >>= PAGE_SHIFT;
row = edac_mc_find_csrow_by_page(mci, info->eap); row = edac_mc_find_csrow_by_page(mci, info->eap);
dimm = mci->csrows[row].channels[0].dimm; dimm = mci->csrows[row]->channels[0]->dimm;
if (info->errsts & 0x0002) if (info->errsts & 0x0002)
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
...@@ -161,8 +161,8 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev) ...@@ -161,8 +161,8 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
* in all eight rows. * in all eight rows.
*/ */
for (index = 0; index < mci->nr_csrows; index++) { for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index]; csrow = mci->csrows[index];
dimm = csrow->channels[0].dimm; dimm = csrow->channels[0]->dimm;
pci_read_config_word(pdev, I82860_GBA + index * 2, &value); pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
cumul_size = (value & I82860_GBA_MASK) << cumul_size = (value & I82860_GBA_MASK) <<
......
...@@ -227,7 +227,7 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci, ...@@ -227,7 +227,7 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
{ {
int row, multi_chan; int row, multi_chan;
multi_chan = mci->csrows[0].nr_channels - 1; multi_chan = mci->csrows[0]->nr_channels - 1;
if (!(info->errsts & 0x0081)) if (!(info->errsts & 0x0081))
return 0; return 0;
...@@ -367,7 +367,7 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci, ...@@ -367,7 +367,7 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
*/ */
for (index = 0; index < mci->nr_csrows; index++) { for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index]; csrow = mci->csrows[index];
value = readb(ovrfl_window + I82875P_DRB + index); value = readb(ovrfl_window + I82875P_DRB + index);
cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
...@@ -382,7 +382,7 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci, ...@@ -382,7 +382,7 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
last_cumul_size = cumul_size; last_cumul_size = cumul_size;
for (j = 0; j < nr_chans; j++) { for (j = 0; j < nr_chans; j++) {
dimm = csrow->channels[j].dimm; dimm = csrow->channels[j]->dimm;
dimm->nr_pages = nr_pages / nr_chans; dimm->nr_pages = nr_pages / nr_chans;
dimm->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ dimm->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
......
...@@ -308,10 +308,10 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci, ...@@ -308,10 +308,10 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
(info->xeap & 1) ? 1 : 0, info->eap, (unsigned int) page); (info->xeap & 1) ? 1 : 0, info->eap, (unsigned int) page);
return 0; return 0;
} }
chan = (mci->csrows[row].nr_channels == 1) ? 0 : info->eap & 1; chan = (mci->csrows[row]->nr_channels == 1) ? 0 : info->eap & 1;
offst = info->eap offst = info->eap
& ((1 << PAGE_SHIFT) - & ((1 << PAGE_SHIFT) -
(1 << mci->csrows[row].channels[chan].dimm->grain)); (1 << mci->csrows[row]->channels[chan]->dimm->grain));
if (info->errsts & 0x0002) if (info->errsts & 0x0002)
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
...@@ -394,7 +394,7 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci, ...@@ -394,7 +394,7 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
*/ */
for (index = 0; index < mci->nr_csrows; index++) { for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index]; csrow = mci->csrows[index];
value = readb(mch_window + I82975X_DRB + index + value = readb(mch_window + I82975X_DRB + index +
((index >= 4) ? 0x80 : 0)); ((index >= 4) ? 0x80 : 0));
...@@ -421,10 +421,10 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci, ...@@ -421,10 +421,10 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
*/ */
dtype = i82975x_dram_type(mch_window, index); dtype = i82975x_dram_type(mch_window, index);
for (chan = 0; chan < csrow->nr_channels; chan++) { for (chan = 0; chan < csrow->nr_channels; chan++) {
dimm = mci->csrows[index].channels[chan].dimm; dimm = mci->csrows[index]->channels[chan]->dimm;
dimm->nr_pages = nr_pages / csrow->nr_channels; dimm->nr_pages = nr_pages / csrow->nr_channels;
strncpy(csrow->channels[chan].dimm->label, strncpy(csrow->channels[chan]->dimm->label,
labels[(index >> 1) + (chan * 2)], labels[(index >> 1) + (chan * 2)],
EDAC_MC_LABEL_LEN); EDAC_MC_LABEL_LEN);
dimm->grain = 1 << 7; /* 128Byte cache-line resolution */ dimm->grain = 1 << 7; /* 128Byte cache-line resolution */
......
...@@ -825,7 +825,7 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci) ...@@ -825,7 +825,7 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci)
pfn = err_addr >> PAGE_SHIFT; pfn = err_addr >> PAGE_SHIFT;
for (row_index = 0; row_index < mci->nr_csrows; row_index++) { for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
csrow = &mci->csrows[row_index]; csrow = mci->csrows[row_index];
if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page)) if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
break; break;
} }
...@@ -945,8 +945,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci) ...@@ -945,8 +945,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
u32 start; u32 start;
u32 end; u32 end;
csrow = &mci->csrows[index]; csrow = mci->csrows[index];
dimm = csrow->channels[0].dimm; dimm = csrow->channels[0]->dimm;
cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 + cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
(index * MPC85XX_MC_CS_BNDS_OFS)); (index * MPC85XX_MC_CS_BNDS_OFS));
......
...@@ -670,8 +670,8 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci, ...@@ -670,8 +670,8 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG); ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
csrow = &mci->csrows[0]; csrow = mci->csrows[0];
dimm = csrow->channels[0].dimm; dimm = csrow->channels[0]->dimm;
dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT; dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT;
dimm->grain = 8; dimm->grain = 8;
......
...@@ -111,14 +111,14 @@ static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta) ...@@ -111,14 +111,14 @@ static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta)
if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS | if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS |
MCDEBUG_ERRSTA_RFL_STATUS)) { MCDEBUG_ERRSTA_RFL_STATUS)) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
mci->csrows[cs].first_page, 0, 0, mci->csrows[cs]->first_page, 0, 0,
cs, 0, -1, mci->ctl_name, "", NULL); cs, 0, -1, mci->ctl_name, "", NULL);
} }
/* correctable/single-bit errors */ /* correctable/single-bit errors */
if (errsta & MCDEBUG_ERRSTA_SBE_STATUS) if (errsta & MCDEBUG_ERRSTA_SBE_STATUS)
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
mci->csrows[cs].first_page, 0, 0, mci->csrows[cs]->first_page, 0, 0,
cs, 0, -1, mci->ctl_name, "", NULL); cs, 0, -1, mci->ctl_name, "", NULL);
} }
...@@ -141,8 +141,8 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci, ...@@ -141,8 +141,8 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
int index; int index;
for (index = 0; index < mci->nr_csrows; index++) { for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index]; csrow = mci->csrows[index];
dimm = csrow->channels[0].dimm; dimm = csrow->channels[0]->dimm;
pci_read_config_dword(pdev, pci_read_config_dword(pdev,
MCDRAM_RANKCFG + (index * 12), MCDRAM_RANKCFG + (index * 12),
......
...@@ -230,8 +230,8 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, ...@@ -230,8 +230,8 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
row_high_limit_last = 0; row_high_limit_last = 0;
for (index = 0; index < mci->nr_csrows; index++) { for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index]; csrow = mci->csrows[index];
dimm = csrow->channels[0].dimm; dimm = csrow->channels[0]->dimm;
/* find the DRAM Chip Select Base address and mask */ /* find the DRAM Chip Select Base address and mask */
pci_read_config_byte(pdev, R82600_DRBA + index, &drbar); pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
......
...@@ -84,10 +84,10 @@ static void tile_edac_check(struct mem_ctl_info *mci) ...@@ -84,10 +84,10 @@ static void tile_edac_check(struct mem_ctl_info *mci)
*/ */
static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci) static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
{ {
struct csrow_info *csrow = &mci->csrows[0]; struct csrow_info *csrow = mci->csrows[0];
struct tile_edac_priv *priv = mci->pvt_info; struct tile_edac_priv *priv = mci->pvt_info;
struct mshim_mem_info mem_info; struct mshim_mem_info mem_info;
struct dimm_info *dimm = csrow->channels[0].dimm; struct dimm_info *dimm = csrow->channels[0]->dimm;
if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info, if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info,
sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) != sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) !=
......
...@@ -378,7 +378,7 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx) ...@@ -378,7 +378,7 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
*/ */
for (i = 0; i < mci->nr_csrows; i++) { for (i = 0; i < mci->nr_csrows; i++) {
unsigned long nr_pages; unsigned long nr_pages;
struct csrow_info *csrow = &mci->csrows[i]; struct csrow_info *csrow = mci->csrows[i];
nr_pages = drb_to_nr_pages(drbs, stacked, nr_pages = drb_to_nr_pages(drbs, stacked,
i / X38_RANKS_PER_CHANNEL, i / X38_RANKS_PER_CHANNEL,
...@@ -388,7 +388,7 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx) ...@@ -388,7 +388,7 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
continue; continue;
for (j = 0; j < x38_channel_num; j++) { for (j = 0; j < x38_channel_num; j++) {
struct dimm_info *dimm = csrow->channels[j].dimm; struct dimm_info *dimm = csrow->channels[j]->dimm;
dimm->nr_pages = nr_pages / x38_channel_num; dimm->nr_pages = nr_pages / x38_channel_num;
dimm->grain = nr_pages << PAGE_SHIFT; dimm->grain = nr_pages << PAGE_SHIFT;
......
...@@ -412,23 +412,21 @@ struct edac_mc_layer { ...@@ -412,23 +412,21 @@ struct edac_mc_layer {
#define EDAC_MAX_LAYERS 3 #define EDAC_MAX_LAYERS 3
/** /**
* EDAC_DIMM_PTR - Macro responsible to find a pointer inside a pointer array * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer array
* for the element given by [layer0,layer1,layer2] position * for the element given by [layer0,layer1,layer2] position
* *
* @layers: a struct edac_mc_layer array, describing how many elements * @layers: a struct edac_mc_layer array, describing how many elements
* were allocated for each layer * were allocated for each layer
* @var: name of the var where we want to get the pointer
* (like mci->dimms)
* @n_layers: Number of layers at the @layers array * @n_layers: Number of layers at the @layers array
* @layer0: layer0 position * @layer0: layer0 position
* @layer1: layer1 position. Unused if n_layers < 2 * @layer1: layer1 position. Unused if n_layers < 2
* @layer2: layer2 position. Unused if n_layers < 3 * @layer2: layer2 position. Unused if n_layers < 3
* *
* For 1 layer, this macro returns &var[layer0] * For 1 layer, this macro returns &var[layer0] - &var
* For 2 layers, this macro is similar to allocate a bi-dimensional array * For 2 layers, this macro is similar to allocate a bi-dimensional array
* and to return "&var[layer0][layer1]" * and to return "&var[layer0][layer1] - &var"
* For 3 layers, this macro is similar to allocate a tri-dimensional array * For 3 layers, this macro is similar to allocate a tri-dimensional array
* and to return "&var[layer0][layer1][layer2]" * and to return "&var[layer0][layer1][layer2] - &var"
* *
* A loop could be used here to make it more generic, but, as we only have * A loop could be used here to make it more generic, but, as we only have
* 3 layers, this is a little faster. * 3 layers, this is a little faster.
...@@ -436,17 +434,46 @@ struct edac_mc_layer { ...@@ -436,17 +434,46 @@ struct edac_mc_layer {
* a NULL is returned, causing an OOPS during the memory allocation routine, * a NULL is returned, causing an OOPS during the memory allocation routine,
* with would point to the developer that he's doing something wrong. * with would point to the developer that he's doing something wrong.
*/ */
#define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \ #define EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2) ({ \
typeof(var) __p; \ int __i; \
if ((nlayers) == 1) \ if ((nlayers) == 1) \
__p = &var[layer0]; \ __i = layer0; \
else if ((nlayers) == 2) \ else if ((nlayers) == 2) \
__p = &var[(layer1) + ((layers[1]).size * (layer0))]; \ __i = (layer1) + ((layers[1]).size * (layer0)); \
else if ((nlayers) == 3) \ else if ((nlayers) == 3) \
__p = &var[(layer2) + ((layers[2]).size * ((layer1) + \ __i = (layer2) + ((layers[2]).size * ((layer1) + \
((layers[1]).size * (layer0))))]; \ ((layers[1]).size * (layer0)))); \
else \ else \
__i = -EINVAL; \
__i; \
})
/**
* EDAC_DIMM_PTR - Macro responsible to get a pointer inside a pointer array
* for the element given by [layer0,layer1,layer2] position
*
* @layers: a struct edac_mc_layer array, describing how many elements
* were allocated for each layer
* @var: name of the var where we want to get the pointer
* (like mci->dimms)
* @n_layers: Number of layers at the @layers array
* @layer0: layer0 position
* @layer1: layer1 position. Unused if n_layers < 2
* @layer2: layer2 position. Unused if n_layers < 3
*
* For 1 layer, this macro returns &var[layer0]
* For 2 layers, this macro is similar to allocate a bi-dimensional array
* and to return "&var[layer0][layer1]"
* For 3 layers, this macro is similar to allocate a tri-dimensional array
* and to return "&var[layer0][layer1][layer2]"
*/
#define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \
typeof(*var) __p; \
int ___i = EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2); \
if (___i < 0) \
__p = NULL; \ __p = NULL; \
else \
__p = (var)[___i]; \
__p; \ __p; \
}) })
...@@ -486,8 +513,6 @@ struct dimm_info { ...@@ -486,8 +513,6 @@ struct dimm_info {
* patches in this series will fix this issue. * patches in this series will fix this issue.
*/ */
struct rank_info { struct rank_info {
struct device dev;
int chan_idx; int chan_idx;
struct csrow_info *csrow; struct csrow_info *csrow;
struct dimm_info *dimm; struct dimm_info *dimm;
...@@ -513,7 +538,7 @@ struct csrow_info { ...@@ -513,7 +538,7 @@ struct csrow_info {
/* channel information for this csrow */ /* channel information for this csrow */
u32 nr_channels; u32 nr_channels;
struct rank_info *channels; struct rank_info **channels;
}; };
/* /*
...@@ -572,7 +597,7 @@ struct mem_ctl_info { ...@@ -572,7 +597,7 @@ struct mem_ctl_info {
unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci, unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
unsigned long page); unsigned long page);
int mc_idx; int mc_idx;
struct csrow_info *csrows; struct csrow_info **csrows;
unsigned nr_csrows, num_cschannel; unsigned nr_csrows, num_cschannel;
/* /*
...@@ -592,7 +617,7 @@ struct mem_ctl_info { ...@@ -592,7 +617,7 @@ struct mem_ctl_info {
* DIMM info. Will eventually remove the entire csrows_info some day * DIMM info. Will eventually remove the entire csrows_info some day
*/ */
unsigned tot_dimms; unsigned tot_dimms;
struct dimm_info *dimms; struct dimm_info **dimms;
/* /*
* FIXME - what about controllers on other busses? - IDs must be * FIXME - what about controllers on other busses? - IDs must be
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment