Commit 2ecf7c43 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Ben Skeggs

drm/nouveau/fb/nv50: defer DMA mapping of scratch page to oneinit() hook

The 100c08 scratch page is mapped using dma_map_page() before the TTM
layer has had a chance to set the DMA mask. This means we are still
running with the default of 32 when this code executes, and this causes
problems for platforms with no memory below 4 GB (such as AMD Seattle)

So move the dma_map_page() to the .oneinit hook, which executes after the
DMA mask has been set.
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent ebf7655a
...@@ -210,6 +210,23 @@ nv50_fb_intr(struct nvkm_fb *base) ...@@ -210,6 +210,23 @@ nv50_fb_intr(struct nvkm_fb *base)
nvkm_fifo_chan_put(fifo, flags, &chan); nvkm_fifo_chan_put(fifo, flags, &chan);
} }
static int
nv50_fb_oneinit(struct nvkm_fb *base)
{
struct nv50_fb *fb = nv50_fb(base);
struct nvkm_device *device = fb->base.subdev.device;
fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (fb->r100c08_page) {
fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(device->dev, fb->r100c08))
return -EFAULT;
}
return 0;
}
static void static void
nv50_fb_init(struct nvkm_fb *base) nv50_fb_init(struct nvkm_fb *base)
{ {
...@@ -245,6 +262,7 @@ nv50_fb_dtor(struct nvkm_fb *base) ...@@ -245,6 +262,7 @@ nv50_fb_dtor(struct nvkm_fb *base)
static const struct nvkm_fb_func static const struct nvkm_fb_func
nv50_fb_ = { nv50_fb_ = {
.dtor = nv50_fb_dtor, .dtor = nv50_fb_dtor,
.oneinit = nv50_fb_oneinit,
.init = nv50_fb_init, .init = nv50_fb_init,
.intr = nv50_fb_intr, .intr = nv50_fb_intr,
.ram_new = nv50_fb_ram_new, .ram_new = nv50_fb_ram_new,
...@@ -263,16 +281,6 @@ nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device, ...@@ -263,16 +281,6 @@ nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
fb->func = func; fb->func = func;
*pfb = &fb->base; *pfb = &fb->base;
fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (fb->r100c08_page) {
fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(device->dev, fb->r100c08))
return -EFAULT;
} else {
nvkm_warn(&fb->base.subdev, "failed 100c08 page alloc\n");
}
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment