Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
9202d732
Commit
9202d732
authored
Nov 01, 2017
by
Ben Skeggs
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
drm/nouveau/imem/nv50-: use new interfaces for vmm operations
Signed-off-by:
Ben Skeggs
<
bskeggs@redhat.com
>
parent
6f4dc18c
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
41 additions
and
32 deletions
+41
-32
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+26
-19
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+15
-13
No files found.
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
View file @
9202d732
...
...
@@ -52,7 +52,7 @@
struct
gk20a_instobj
{
struct
nvkm_memory
memory
;
struct
nvkm_m
em
mem
;
struct
nvkm_m
m_node
*
mn
;
struct
gk20a_instmem
*
imem
;
/* CPU mapping */
...
...
@@ -129,13 +129,13 @@ gk20a_instobj_page(struct nvkm_memory *memory)
static
u64
gk20a_instobj_addr
(
struct
nvkm_memory
*
memory
)
{
return
gk20a_instobj
(
memory
)
->
mem
.
offset
;
return
(
u64
)
gk20a_instobj
(
memory
)
->
mn
->
offset
<<
12
;
}
static
u64
gk20a_instobj_size
(
struct
nvkm_memory
*
memory
)
{
return
(
u64
)
gk20a_instobj
(
memory
)
->
m
em
.
size
<<
12
;
return
(
u64
)
gk20a_instobj
(
memory
)
->
m
n
->
length
<<
12
;
}
/*
...
...
@@ -284,8 +284,22 @@ gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
struct
nvkm_vma
*
vma
,
void
*
argv
,
u32
argc
)
{
struct
gk20a_instobj
*
node
=
gk20a_instobj
(
memory
);
nvkm_vm_map_at
(
vma
,
0
,
&
node
->
mem
);
return
0
;
struct
nvkm_vmm_map
map
=
{
.
memory
=
&
node
->
memory
,
.
offset
=
offset
,
.
mem
=
node
->
mn
,
};
if
(
vma
->
vm
)
{
struct
nvkm_mem
mem
=
{
.
mem
=
node
->
mn
,
.
memory
=
&
node
->
memory
,
};
nvkm_vm_map_at
(
vma
,
0
,
&
mem
);
return
0
;
}
return
nvkm_vmm_map
(
vmm
,
vma
,
argv
,
argc
,
&
map
);
}
static
void
*
...
...
@@ -298,8 +312,8 @@ gk20a_instobj_dtor_dma(struct nvkm_memory *memory)
if
(
unlikely
(
!
node
->
base
.
vaddr
))
goto
out
;
dma_free_attrs
(
dev
,
node
->
base
.
mem
.
size
<<
PAGE_SHIFT
,
node
->
base
.
vaddr
,
node
->
handle
,
imem
->
attrs
);
dma_free_attrs
(
dev
,
(
u64
)
node
->
base
.
mn
->
length
<<
PAGE_SHIFT
,
node
->
base
.
vaddr
,
node
->
handle
,
imem
->
attrs
);
out:
return
node
;
...
...
@@ -311,7 +325,7 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
struct
gk20a_instobj_iommu
*
node
=
gk20a_instobj_iommu
(
memory
);
struct
gk20a_instmem
*
imem
=
node
->
base
.
imem
;
struct
device
*
dev
=
imem
->
base
.
subdev
.
device
->
dev
;
struct
nvkm_mm_node
*
r
=
node
->
base
.
m
em
.
mem
;
struct
nvkm_mm_node
*
r
=
node
->
base
.
m
n
;
int
i
;
if
(
unlikely
(
!
r
))
...
...
@@ -329,7 +343,7 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
r
->
offset
&=
~
BIT
(
imem
->
iommu_bit
-
imem
->
iommu_pgshift
);
/* Unmap pages from GPU address space and free them */
for
(
i
=
0
;
i
<
node
->
base
.
m
em
.
size
;
i
++
)
{
for
(
i
=
0
;
i
<
node
->
base
.
m
n
->
length
;
i
++
)
{
iommu_unmap
(
imem
->
domain
,
(
r
->
offset
+
i
)
<<
imem
->
iommu_pgshift
,
PAGE_SIZE
);
dma_unmap_page
(
dev
,
node
->
dma_addrs
[
i
],
PAGE_SIZE
,
...
...
@@ -410,8 +424,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
node
->
r
.
offset
=
node
->
handle
>>
12
;
node
->
r
.
length
=
(
npages
<<
PAGE_SHIFT
)
>>
12
;
node
->
base
.
mem
.
offset
=
node
->
handle
;
node
->
base
.
mem
.
mem
=
&
node
->
r
;
node
->
base
.
mn
=
&
node
->
r
;
return
0
;
}
...
...
@@ -488,8 +501,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
/* IOMMU bit tells that an address is to be resolved through the IOMMU */
r
->
offset
|=
BIT
(
imem
->
iommu_bit
-
imem
->
iommu_pgshift
);
node
->
base
.
mem
.
offset
=
((
u64
)
r
->
offset
)
<<
imem
->
iommu_pgshift
;
node
->
base
.
mem
.
mem
=
r
;
node
->
base
.
mn
=
r
;
return
0
;
release_area:
...
...
@@ -537,13 +549,8 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
node
->
imem
=
imem
;
/* present memory for being mapped using small pages */
node
->
mem
.
size
=
size
>>
12
;
node
->
mem
.
memtype
=
0
;
node
->
mem
.
memory
=
&
node
->
memory
;
nvkm_debug
(
subdev
,
"alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx
\n
"
,
size
,
align
,
node
->
mem
.
offset
);
size
,
align
,
(
u64
)
node
->
mn
->
offset
<<
12
);
return
0
;
}
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
View file @
9202d732
...
...
@@ -46,7 +46,7 @@ struct nv50_instobj {
struct
nvkm_instobj
base
;
struct
nv50_instmem
*
imem
;
struct
nvkm_memory
*
ram
;
struct
nvkm_vma
bar
;
struct
nvkm_vma
*
bar
;
refcount_t
maps
;
void
*
map
;
struct
list_head
lru
;
...
...
@@ -124,7 +124,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
struct
nvkm_memory
*
memory
=
&
iobj
->
base
.
memory
;
struct
nvkm_subdev
*
subdev
=
&
imem
->
base
.
subdev
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_vma
bar
=
{},
ebar
;
struct
nvkm_vma
*
bar
=
NULL
,
*
ebar
;
u64
size
=
nvkm_memory_size
(
memory
);
void
*
emap
;
int
ret
;
...
...
@@ -134,7 +134,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
* to the possibility of recursion for page table allocation.
*/
mutex_unlock
(
&
subdev
->
mutex
);
while
((
ret
=
nvkm_vm
_get
(
vmm
,
size
,
12
,
NV_MEM_ACCESS_RW
,
&
bar
)))
{
while
((
ret
=
nvkm_vm
m_get
(
vmm
,
12
,
size
,
&
bar
)))
{
/* Evict unused mappings, and keep retrying until we either
* succeed,or there's no more objects left on the LRU.
*/
...
...
@@ -144,10 +144,10 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
nvkm_debug
(
subdev
,
"evict %016llx %016llx @ %016llx
\n
"
,
nvkm_memory_addr
(
&
eobj
->
base
.
memory
),
nvkm_memory_size
(
&
eobj
->
base
.
memory
),
eobj
->
bar
.
offset
);
eobj
->
bar
->
addr
);
list_del_init
(
&
eobj
->
lru
);
ebar
=
eobj
->
bar
;
eobj
->
bar
.
node
=
NULL
;
eobj
->
bar
=
NULL
;
emap
=
eobj
->
map
;
eobj
->
map
=
NULL
;
}
...
...
@@ -155,16 +155,16 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
if
(
!
eobj
)
break
;
iounmap
(
emap
);
nvkm_vm
_put
(
&
ebar
);
nvkm_vm
m_put
(
vmm
,
&
ebar
);
}
if
(
ret
==
0
)
ret
=
nvkm_memory_map
(
memory
,
0
,
vmm
,
&
bar
,
NULL
,
0
);
ret
=
nvkm_memory_map
(
memory
,
0
,
vmm
,
bar
,
NULL
,
0
);
mutex_lock
(
&
subdev
->
mutex
);
if
(
ret
||
iobj
->
bar
.
node
)
{
if
(
ret
||
iobj
->
bar
)
{
/* We either failed, or another thread beat us. */
mutex_unlock
(
&
subdev
->
mutex
);
nvkm_vm
_put
(
&
bar
);
nvkm_vm
m_put
(
vmm
,
&
bar
);
mutex_lock
(
&
subdev
->
mutex
);
return
;
}
...
...
@@ -172,10 +172,10 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
/* Make the mapping visible to the host. */
iobj
->
bar
=
bar
;
iobj
->
map
=
ioremap_wc
(
device
->
func
->
resource_addr
(
device
,
3
)
+
(
u32
)
iobj
->
bar
.
offset
,
size
);
(
u32
)
iobj
->
bar
->
addr
,
size
);
if
(
!
iobj
->
map
)
{
nvkm_warn
(
subdev
,
"PRAMIN ioremap failed
\n
"
);
nvkm_vm
_put
(
&
iobj
->
bar
);
nvkm_vm
m_put
(
vmm
,
&
iobj
->
bar
);
}
}
...
...
@@ -299,7 +299,7 @@ nv50_instobj_dtor(struct nvkm_memory *memory)
{
struct
nv50_instobj
*
iobj
=
nv50_instobj
(
memory
);
struct
nvkm_instmem
*
imem
=
&
iobj
->
imem
->
base
;
struct
nvkm_vma
bar
;
struct
nvkm_vma
*
bar
;
void
*
map
=
map
;
mutex_lock
(
&
imem
->
subdev
.
mutex
);
...
...
@@ -310,8 +310,10 @@ nv50_instobj_dtor(struct nvkm_memory *memory)
mutex_unlock
(
&
imem
->
subdev
.
mutex
);
if
(
map
)
{
struct
nvkm_vmm
*
vmm
=
nvkm_bar_bar2_vmm
(
imem
->
subdev
.
device
);
iounmap
(
map
);
nvkm_vm_put
(
&
bar
);
if
(
likely
(
vmm
))
/* Can be NULL during BAR destructor. */
nvkm_vmm_put
(
vmm
,
&
bar
);
}
nvkm_memory_unref
(
&
iobj
->
ram
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment