Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
605f9ccd
Commit
605f9ccd
authored
May 17, 2016
by
Ben Skeggs
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
drm/nouveau: s/mem/reg/ for struct ttm_mem_reg variables
Signed-off-by:
Ben Skeggs
<
bskeggs@redhat.com
>
parent
1167c6bc
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
169 additions
and
169 deletions
+169
-169
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_bo.c
+133
-133
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nouveau_sgdma.c
+6
-6
drivers/gpu/drm/nouveau/nouveau_ttm.c
drivers/gpu/drm/nouveau/nouveau_ttm.c
+24
-24
drivers/gpu/drm/nouveau/nv17_fence.c
drivers/gpu/drm/nouveau/nv17_fence.c
+3
-3
drivers/gpu/drm/nouveau/nv50_fence.c
drivers/gpu/drm/nouveau/nv50_fence.c
+3
-3
No files found.
drivers/gpu/drm/nouveau/nouveau_bo.c
View file @
605f9ccd
...
@@ -655,20 +655,20 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
...
@@ -655,20 +655,20 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
static
int
static
int
nve0_bo_move_copy
(
struct
nouveau_channel
*
chan
,
struct
ttm_buffer_object
*
bo
,
nve0_bo_move_copy
(
struct
nouveau_channel
*
chan
,
struct
ttm_buffer_object
*
bo
,
struct
ttm_mem_reg
*
old_
mem
,
struct
ttm_mem_reg
*
new_mem
)
struct
ttm_mem_reg
*
old_
reg
,
struct
ttm_mem_reg
*
new_reg
)
{
{
struct
nvkm_mem
*
node
=
old_mem
->
mm_node
;
struct
nvkm_mem
*
mem
=
old_reg
->
mm_node
;
int
ret
=
RING_SPACE
(
chan
,
10
);
int
ret
=
RING_SPACE
(
chan
,
10
);
if
(
ret
==
0
)
{
if
(
ret
==
0
)
{
BEGIN_NVC0
(
chan
,
NvSubCopy
,
0x0400
,
8
);
BEGIN_NVC0
(
chan
,
NvSubCopy
,
0x0400
,
8
);
OUT_RING
(
chan
,
upper_32_bits
(
node
->
vma
[
0
].
offset
));
OUT_RING
(
chan
,
upper_32_bits
(
mem
->
vma
[
0
].
offset
));
OUT_RING
(
chan
,
lower_32_bits
(
node
->
vma
[
0
].
offset
));
OUT_RING
(
chan
,
lower_32_bits
(
mem
->
vma
[
0
].
offset
));
OUT_RING
(
chan
,
upper_32_bits
(
node
->
vma
[
1
].
offset
));
OUT_RING
(
chan
,
upper_32_bits
(
mem
->
vma
[
1
].
offset
));
OUT_RING
(
chan
,
lower_32_bits
(
node
->
vma
[
1
].
offset
));
OUT_RING
(
chan
,
lower_32_bits
(
mem
->
vma
[
1
].
offset
));
OUT_RING
(
chan
,
PAGE_SIZE
);
OUT_RING
(
chan
,
PAGE_SIZE
);
OUT_RING
(
chan
,
PAGE_SIZE
);
OUT_RING
(
chan
,
PAGE_SIZE
);
OUT_RING
(
chan
,
PAGE_SIZE
);
OUT_RING
(
chan
,
PAGE_SIZE
);
OUT_RING
(
chan
,
new_
mem
->
num_pages
);
OUT_RING
(
chan
,
new_
reg
->
num_pages
);
BEGIN_IMC0
(
chan
,
NvSubCopy
,
0x0300
,
0x0386
);
BEGIN_IMC0
(
chan
,
NvSubCopy
,
0x0300
,
0x0386
);
}
}
return
ret
;
return
ret
;
...
@@ -687,15 +687,15 @@ nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
...
@@ -687,15 +687,15 @@ nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
static
int
static
int
nvc0_bo_move_copy
(
struct
nouveau_channel
*
chan
,
struct
ttm_buffer_object
*
bo
,
nvc0_bo_move_copy
(
struct
nouveau_channel
*
chan
,
struct
ttm_buffer_object
*
bo
,
struct
ttm_mem_reg
*
old_
mem
,
struct
ttm_mem_reg
*
new_mem
)
struct
ttm_mem_reg
*
old_
reg
,
struct
ttm_mem_reg
*
new_reg
)
{
{
struct
nvkm_mem
*
node
=
old_mem
->
mm_node
;
struct
nvkm_mem
*
mem
=
old_reg
->
mm_node
;
u64
src_offset
=
node
->
vma
[
0
].
offset
;
u64
src_offset
=
mem
->
vma
[
0
].
offset
;
u64
dst_offset
=
node
->
vma
[
1
].
offset
;
u64
dst_offset
=
mem
->
vma
[
1
].
offset
;
u32
page_count
=
new_
mem
->
num_pages
;
u32
page_count
=
new_
reg
->
num_pages
;
int
ret
;
int
ret
;
page_count
=
new_
mem
->
num_pages
;
page_count
=
new_
reg
->
num_pages
;
while
(
page_count
)
{
while
(
page_count
)
{
int
line_count
=
(
page_count
>
8191
)
?
8191
:
page_count
;
int
line_count
=
(
page_count
>
8191
)
?
8191
:
page_count
;
...
@@ -725,15 +725,15 @@ nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
...
@@ -725,15 +725,15 @@ nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
static
int
static
int
nvc0_bo_move_m2mf
(
struct
nouveau_channel
*
chan
,
struct
ttm_buffer_object
*
bo
,
nvc0_bo_move_m2mf
(
struct
nouveau_channel
*
chan
,
struct
ttm_buffer_object
*
bo
,
struct
ttm_mem_reg
*
old_
mem
,
struct
ttm_mem_reg
*
new_mem
)
struct
ttm_mem_reg
*
old_
reg
,
struct
ttm_mem_reg
*
new_reg
)
{
{
struct
nvkm_mem
*
node
=
old_mem
->
mm_node
;
struct
nvkm_mem
*
mem
=
old_reg
->
mm_node
;
u64
src_offset
=
node
->
vma
[
0
].
offset
;
u64
src_offset
=
mem
->
vma
[
0
].
offset
;
u64
dst_offset
=
node
->
vma
[
1
].
offset
;
u64
dst_offset
=
mem
->
vma
[
1
].
offset
;
u32
page_count
=
new_
mem
->
num_pages
;
u32
page_count
=
new_
reg
->
num_pages
;
int
ret
;
int
ret
;
page_count
=
new_
mem
->
num_pages
;
page_count
=
new_
reg
->
num_pages
;
while
(
page_count
)
{
while
(
page_count
)
{
int
line_count
=
(
page_count
>
2047
)
?
2047
:
page_count
;
int
line_count
=
(
page_count
>
2047
)
?
2047
:
page_count
;
...
@@ -764,15 +764,15 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
...
@@ -764,15 +764,15 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
static
int
static
int
nva3_bo_move_copy
(
struct
nouveau_channel
*
chan
,
struct
ttm_buffer_object
*
bo
,
nva3_bo_move_copy
(
struct
nouveau_channel
*
chan
,
struct
ttm_buffer_object
*
bo
,
struct
ttm_mem_reg
*
old_
mem
,
struct
ttm_mem_reg
*
new_mem
)
struct
ttm_mem_reg
*
old_
reg
,
struct
ttm_mem_reg
*
new_reg
)
{
{
struct
nvkm_mem
*
node
=
old_mem
->
mm_node
;
struct
nvkm_mem
*
mem
=
old_reg
->
mm_node
;
u64
src_offset
=
node
->
vma
[
0
].
offset
;
u64
src_offset
=
mem
->
vma
[
0
].
offset
;
u64
dst_offset
=
node
->
vma
[
1
].
offset
;
u64
dst_offset
=
mem
->
vma
[
1
].
offset
;
u32
page_count
=
new_
mem
->
num_pages
;
u32
page_count
=
new_
reg
->
num_pages
;
int
ret
;
int
ret
;
page_count
=
new_
mem
->
num_pages
;
page_count
=
new_
reg
->
num_pages
;
while
(
page_count
)
{
while
(
page_count
)
{
int
line_count
=
(
page_count
>
8191
)
?
8191
:
page_count
;
int
line_count
=
(
page_count
>
8191
)
?
8191
:
page_count
;
...
@@ -802,35 +802,35 @@ nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
...
@@ -802,35 +802,35 @@ nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
static
int
static
int
nv98_bo_move_exec
(
struct
nouveau_channel
*
chan
,
struct
ttm_buffer_object
*
bo
,
nv98_bo_move_exec
(
struct
nouveau_channel
*
chan
,
struct
ttm_buffer_object
*
bo
,
struct
ttm_mem_reg
*
old_
mem
,
struct
ttm_mem_reg
*
new_mem
)
struct
ttm_mem_reg
*
old_
reg
,
struct
ttm_mem_reg
*
new_reg
)
{
{
struct
nvkm_mem
*
node
=
old_mem
->
mm_node
;
struct
nvkm_mem
*
mem
=
old_reg
->
mm_node
;
int
ret
=
RING_SPACE
(
chan
,
7
);
int
ret
=
RING_SPACE
(
chan
,
7
);
if
(
ret
==
0
)
{
if
(
ret
==
0
)
{
BEGIN_NV04
(
chan
,
NvSubCopy
,
0x0320
,
6
);
BEGIN_NV04
(
chan
,
NvSubCopy
,
0x0320
,
6
);
OUT_RING
(
chan
,
upper_32_bits
(
node
->
vma
[
0
].
offset
));
OUT_RING
(
chan
,
upper_32_bits
(
mem
->
vma
[
0
].
offset
));
OUT_RING
(
chan
,
lower_32_bits
(
node
->
vma
[
0
].
offset
));
OUT_RING
(
chan
,
lower_32_bits
(
mem
->
vma
[
0
].
offset
));
OUT_RING
(
chan
,
upper_32_bits
(
node
->
vma
[
1
].
offset
));
OUT_RING
(
chan
,
upper_32_bits
(
mem
->
vma
[
1
].
offset
));
OUT_RING
(
chan
,
lower_32_bits
(
node
->
vma
[
1
].
offset
));
OUT_RING
(
chan
,
lower_32_bits
(
mem
->
vma
[
1
].
offset
));
OUT_RING
(
chan
,
0x00000000
/* COPY */
);
OUT_RING
(
chan
,
0x00000000
/* COPY */
);
OUT_RING
(
chan
,
new_
mem
->
num_pages
<<
PAGE_SHIFT
);
OUT_RING
(
chan
,
new_
reg
->
num_pages
<<
PAGE_SHIFT
);
}
}
return
ret
;
return
ret
;
}
}
static
int
static
int
nv84_bo_move_exec
(
struct
nouveau_channel
*
chan
,
struct
ttm_buffer_object
*
bo
,
nv84_bo_move_exec
(
struct
nouveau_channel
*
chan
,
struct
ttm_buffer_object
*
bo
,
struct
ttm_mem_reg
*
old_
mem
,
struct
ttm_mem_reg
*
new_mem
)
struct
ttm_mem_reg
*
old_
reg
,
struct
ttm_mem_reg
*
new_reg
)
{
{
struct
nvkm_mem
*
node
=
old_mem
->
mm_node
;
struct
nvkm_mem
*
mem
=
old_reg
->
mm_node
;
int
ret
=
RING_SPACE
(
chan
,
7
);
int
ret
=
RING_SPACE
(
chan
,
7
);
if
(
ret
==
0
)
{
if
(
ret
==
0
)
{
BEGIN_NV04
(
chan
,
NvSubCopy
,
0x0304
,
6
);
BEGIN_NV04
(
chan
,
NvSubCopy
,
0x0304
,
6
);
OUT_RING
(
chan
,
new_
mem
->
num_pages
<<
PAGE_SHIFT
);
OUT_RING
(
chan
,
new_
reg
->
num_pages
<<
PAGE_SHIFT
);
OUT_RING
(
chan
,
upper_32_bits
(
node
->
vma
[
0
].
offset
));
OUT_RING
(
chan
,
upper_32_bits
(
mem
->
vma
[
0
].
offset
));
OUT_RING
(
chan
,
lower_32_bits
(
node
->
vma
[
0
].
offset
));
OUT_RING
(
chan
,
lower_32_bits
(
mem
->
vma
[
0
].
offset
));
OUT_RING
(
chan
,
upper_32_bits
(
node
->
vma
[
1
].
offset
));
OUT_RING
(
chan
,
upper_32_bits
(
mem
->
vma
[
1
].
offset
));
OUT_RING
(
chan
,
lower_32_bits
(
node
->
vma
[
1
].
offset
));
OUT_RING
(
chan
,
lower_32_bits
(
mem
->
vma
[
1
].
offset
));
OUT_RING
(
chan
,
0x00000000
/* MODE_COPY, QUERY_NONE */
);
OUT_RING
(
chan
,
0x00000000
/* MODE_COPY, QUERY_NONE */
);
}
}
return
ret
;
return
ret
;
...
@@ -854,14 +854,14 @@ nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
...
@@ -854,14 +854,14 @@ nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
static
int
static
int
nv50_bo_move_m2mf
(
struct
nouveau_channel
*
chan
,
struct
ttm_buffer_object
*
bo
,
nv50_bo_move_m2mf
(
struct
nouveau_channel
*
chan
,
struct
ttm_buffer_object
*
bo
,
struct
ttm_mem_reg
*
old_
mem
,
struct
ttm_mem_reg
*
new_mem
)
struct
ttm_mem_reg
*
old_
reg
,
struct
ttm_mem_reg
*
new_reg
)
{
{
struct
nvkm_mem
*
node
=
old_mem
->
mm_node
;
struct
nvkm_mem
*
mem
=
old_reg
->
mm_node
;
u64
length
=
(
new_
mem
->
num_pages
<<
PAGE_SHIFT
);
u64
length
=
(
new_
reg
->
num_pages
<<
PAGE_SHIFT
);
u64
src_offset
=
node
->
vma
[
0
].
offset
;
u64
src_offset
=
mem
->
vma
[
0
].
offset
;
u64
dst_offset
=
node
->
vma
[
1
].
offset
;
u64
dst_offset
=
mem
->
vma
[
1
].
offset
;
int
src_tiled
=
!!
node
->
memtype
;
int
src_tiled
=
!!
mem
->
memtype
;
int
dst_tiled
=
!!
((
struct
nvkm_mem
*
)
new_
mem
->
mm_node
)
->
memtype
;
int
dst_tiled
=
!!
((
struct
nvkm_mem
*
)
new_
reg
->
mm_node
)
->
memtype
;
int
ret
;
int
ret
;
while
(
length
)
{
while
(
length
)
{
...
@@ -941,20 +941,20 @@ nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
...
@@ -941,20 +941,20 @@ nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
static
inline
uint32_t
static
inline
uint32_t
nouveau_bo_mem_ctxdma
(
struct
ttm_buffer_object
*
bo
,
nouveau_bo_mem_ctxdma
(
struct
ttm_buffer_object
*
bo
,
struct
nouveau_channel
*
chan
,
struct
ttm_mem_reg
*
mem
)
struct
nouveau_channel
*
chan
,
struct
ttm_mem_reg
*
reg
)
{
{
if
(
mem
->
mem_type
==
TTM_PL_TT
)
if
(
reg
->
mem_type
==
TTM_PL_TT
)
return
NvDmaTT
;
return
NvDmaTT
;
return
chan
->
vram
.
handle
;
return
chan
->
vram
.
handle
;
}
}
static
int
static
int
nv04_bo_move_m2mf
(
struct
nouveau_channel
*
chan
,
struct
ttm_buffer_object
*
bo
,
nv04_bo_move_m2mf
(
struct
nouveau_channel
*
chan
,
struct
ttm_buffer_object
*
bo
,
struct
ttm_mem_reg
*
old_
mem
,
struct
ttm_mem_reg
*
new_mem
)
struct
ttm_mem_reg
*
old_
reg
,
struct
ttm_mem_reg
*
new_reg
)
{
{
u32
src_offset
=
old_
mem
->
start
<<
PAGE_SHIFT
;
u32
src_offset
=
old_
reg
->
start
<<
PAGE_SHIFT
;
u32
dst_offset
=
new_
mem
->
start
<<
PAGE_SHIFT
;
u32
dst_offset
=
new_
reg
->
start
<<
PAGE_SHIFT
;
u32
page_count
=
new_
mem
->
num_pages
;
u32
page_count
=
new_
reg
->
num_pages
;
int
ret
;
int
ret
;
ret
=
RING_SPACE
(
chan
,
3
);
ret
=
RING_SPACE
(
chan
,
3
);
...
@@ -962,10 +962,10 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
...
@@ -962,10 +962,10 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
return
ret
;
return
ret
;
BEGIN_NV04
(
chan
,
NvSubCopy
,
NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE
,
2
);
BEGIN_NV04
(
chan
,
NvSubCopy
,
NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE
,
2
);
OUT_RING
(
chan
,
nouveau_bo_mem_ctxdma
(
bo
,
chan
,
old_
mem
));
OUT_RING
(
chan
,
nouveau_bo_mem_ctxdma
(
bo
,
chan
,
old_
reg
));
OUT_RING
(
chan
,
nouveau_bo_mem_ctxdma
(
bo
,
chan
,
new_
mem
));
OUT_RING
(
chan
,
nouveau_bo_mem_ctxdma
(
bo
,
chan
,
new_
reg
));
page_count
=
new_
mem
->
num_pages
;
page_count
=
new_
reg
->
num_pages
;
while
(
page_count
)
{
while
(
page_count
)
{
int
line_count
=
(
page_count
>
2047
)
?
2047
:
page_count
;
int
line_count
=
(
page_count
>
2047
)
?
2047
:
page_count
;
...
@@ -996,33 +996,33 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
...
@@ -996,33 +996,33 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
static
int
static
int
nouveau_bo_move_prep
(
struct
nouveau_drm
*
drm
,
struct
ttm_buffer_object
*
bo
,
nouveau_bo_move_prep
(
struct
nouveau_drm
*
drm
,
struct
ttm_buffer_object
*
bo
,
struct
ttm_mem_reg
*
mem
)
struct
ttm_mem_reg
*
reg
)
{
{
struct
nvkm_mem
*
old_
node
=
bo
->
mem
.
mm_node
;
struct
nvkm_mem
*
old_
mem
=
bo
->
mem
.
mm_node
;
struct
nvkm_mem
*
new_
node
=
mem
->
mm_node
;
struct
nvkm_mem
*
new_
mem
=
reg
->
mm_node
;
u64
size
=
(
u64
)
mem
->
num_pages
<<
PAGE_SHIFT
;
u64
size
=
(
u64
)
reg
->
num_pages
<<
PAGE_SHIFT
;
int
ret
;
int
ret
;
ret
=
nvkm_vm_get
(
drm
->
client
.
vm
,
size
,
old_
node
->
page_shift
,
ret
=
nvkm_vm_get
(
drm
->
client
.
vm
,
size
,
old_
mem
->
page_shift
,
NV_MEM_ACCESS_RW
,
&
old_
node
->
vma
[
0
]);
NV_MEM_ACCESS_RW
,
&
old_
mem
->
vma
[
0
]);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
ret
=
nvkm_vm_get
(
drm
->
client
.
vm
,
size
,
new_
node
->
page_shift
,
ret
=
nvkm_vm_get
(
drm
->
client
.
vm
,
size
,
new_
mem
->
page_shift
,
NV_MEM_ACCESS_RW
,
&
old_
node
->
vma
[
1
]);
NV_MEM_ACCESS_RW
,
&
old_
mem
->
vma
[
1
]);
if
(
ret
)
{
if
(
ret
)
{
nvkm_vm_put
(
&
old_
node
->
vma
[
0
]);
nvkm_vm_put
(
&
old_
mem
->
vma
[
0
]);
return
ret
;
return
ret
;
}
}
nvkm_vm_map
(
&
old_
node
->
vma
[
0
],
old_node
);
nvkm_vm_map
(
&
old_
mem
->
vma
[
0
],
old_mem
);
nvkm_vm_map
(
&
old_
node
->
vma
[
1
],
new_node
);
nvkm_vm_map
(
&
old_
mem
->
vma
[
1
],
new_mem
);
return
0
;
return
0
;
}
}
static
int
static
int
nouveau_bo_move_m2mf
(
struct
ttm_buffer_object
*
bo
,
int
evict
,
bool
intr
,
nouveau_bo_move_m2mf
(
struct
ttm_buffer_object
*
bo
,
int
evict
,
bool
intr
,
bool
no_wait_gpu
,
struct
ttm_mem_reg
*
new_
mem
)
bool
no_wait_gpu
,
struct
ttm_mem_reg
*
new_
reg
)
{
{
struct
nouveau_drm
*
drm
=
nouveau_bdev
(
bo
->
bdev
);
struct
nouveau_drm
*
drm
=
nouveau_bdev
(
bo
->
bdev
);
struct
nouveau_channel
*
chan
=
drm
->
ttm
.
chan
;
struct
nouveau_channel
*
chan
=
drm
->
ttm
.
chan
;
...
@@ -1035,7 +1035,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
...
@@ -1035,7 +1035,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
* destroyed the ttm_mem_reg
* destroyed the ttm_mem_reg
*/
*/
if
(
drm
->
client
.
device
.
info
.
family
>=
NV_DEVICE_INFO_V0_TESLA
)
{
if
(
drm
->
client
.
device
.
info
.
family
>=
NV_DEVICE_INFO_V0_TESLA
)
{
ret
=
nouveau_bo_move_prep
(
drm
,
bo
,
new_
mem
);
ret
=
nouveau_bo_move_prep
(
drm
,
bo
,
new_
reg
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
}
}
...
@@ -1043,14 +1043,14 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
...
@@ -1043,14 +1043,14 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
mutex_lock_nested
(
&
cli
->
mutex
,
SINGLE_DEPTH_NESTING
);
mutex_lock_nested
(
&
cli
->
mutex
,
SINGLE_DEPTH_NESTING
);
ret
=
nouveau_fence_sync
(
nouveau_bo
(
bo
),
chan
,
true
,
intr
);
ret
=
nouveau_fence_sync
(
nouveau_bo
(
bo
),
chan
,
true
,
intr
);
if
(
ret
==
0
)
{
if
(
ret
==
0
)
{
ret
=
drm
->
ttm
.
move
(
chan
,
bo
,
&
bo
->
mem
,
new_
mem
);
ret
=
drm
->
ttm
.
move
(
chan
,
bo
,
&
bo
->
mem
,
new_
reg
);
if
(
ret
==
0
)
{
if
(
ret
==
0
)
{
ret
=
nouveau_fence_new
(
chan
,
false
,
&
fence
);
ret
=
nouveau_fence_new
(
chan
,
false
,
&
fence
);
if
(
ret
==
0
)
{
if
(
ret
==
0
)
{
ret
=
ttm_bo_move_accel_cleanup
(
bo
,
ret
=
ttm_bo_move_accel_cleanup
(
bo
,
&
fence
->
base
,
&
fence
->
base
,
evict
,
evict
,
new_
mem
);
new_
reg
);
nouveau_fence_unref
(
&
fence
);
nouveau_fence_unref
(
&
fence
);
}
}
}
}
...
@@ -1125,7 +1125,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
...
@@ -1125,7 +1125,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
static
int
static
int
nouveau_bo_move_flipd
(
struct
ttm_buffer_object
*
bo
,
bool
evict
,
bool
intr
,
nouveau_bo_move_flipd
(
struct
ttm_buffer_object
*
bo
,
bool
evict
,
bool
intr
,
bool
no_wait_gpu
,
struct
ttm_mem_reg
*
new_
mem
)
bool
no_wait_gpu
,
struct
ttm_mem_reg
*
new_
reg
)
{
{
struct
ttm_place
placement_memtype
=
{
struct
ttm_place
placement_memtype
=
{
.
fpfn
=
0
,
.
fpfn
=
0
,
...
@@ -1133,35 +1133,35 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
...
@@ -1133,35 +1133,35 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
.
flags
=
TTM_PL_FLAG_TT
|
TTM_PL_MASK_CACHING
.
flags
=
TTM_PL_FLAG_TT
|
TTM_PL_MASK_CACHING
};
};
struct
ttm_placement
placement
;
struct
ttm_placement
placement
;
struct
ttm_mem_reg
tmp_
mem
;
struct
ttm_mem_reg
tmp_
reg
;
int
ret
;
int
ret
;
placement
.
num_placement
=
placement
.
num_busy_placement
=
1
;
placement
.
num_placement
=
placement
.
num_busy_placement
=
1
;
placement
.
placement
=
placement
.
busy_placement
=
&
placement_memtype
;
placement
.
placement
=
placement
.
busy_placement
=
&
placement_memtype
;
tmp_
mem
=
*
new_mem
;
tmp_
reg
=
*
new_reg
;
tmp_
mem
.
mm_node
=
NULL
;
tmp_
reg
.
mm_node
=
NULL
;
ret
=
ttm_bo_mem_space
(
bo
,
&
placement
,
&
tmp_
mem
,
intr
,
no_wait_gpu
);
ret
=
ttm_bo_mem_space
(
bo
,
&
placement
,
&
tmp_
reg
,
intr
,
no_wait_gpu
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
ret
=
ttm_tt_bind
(
bo
->
ttm
,
&
tmp_
mem
);
ret
=
ttm_tt_bind
(
bo
->
ttm
,
&
tmp_
reg
);
if
(
ret
)
if
(
ret
)
goto
out
;
goto
out
;
ret
=
nouveau_bo_move_m2mf
(
bo
,
true
,
intr
,
no_wait_gpu
,
&
tmp_
mem
);
ret
=
nouveau_bo_move_m2mf
(
bo
,
true
,
intr
,
no_wait_gpu
,
&
tmp_
reg
);
if
(
ret
)
if
(
ret
)
goto
out
;
goto
out
;
ret
=
ttm_bo_move_ttm
(
bo
,
intr
,
no_wait_gpu
,
new_
mem
);
ret
=
ttm_bo_move_ttm
(
bo
,
intr
,
no_wait_gpu
,
new_
reg
);
out:
out:
ttm_bo_mem_put
(
bo
,
&
tmp_
mem
);
ttm_bo_mem_put
(
bo
,
&
tmp_
reg
);
return
ret
;
return
ret
;
}
}
static
int
static
int
nouveau_bo_move_flips
(
struct
ttm_buffer_object
*
bo
,
bool
evict
,
bool
intr
,
nouveau_bo_move_flips
(
struct
ttm_buffer_object
*
bo
,
bool
evict
,
bool
intr
,
bool
no_wait_gpu
,
struct
ttm_mem_reg
*
new_
mem
)
bool
no_wait_gpu
,
struct
ttm_mem_reg
*
new_
reg
)
{
{
struct
ttm_place
placement_memtype
=
{
struct
ttm_place
placement_memtype
=
{
.
fpfn
=
0
,
.
fpfn
=
0
,
...
@@ -1169,34 +1169,34 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
...
@@ -1169,34 +1169,34 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
.
flags
=
TTM_PL_FLAG_TT
|
TTM_PL_MASK_CACHING
.
flags
=
TTM_PL_FLAG_TT
|
TTM_PL_MASK_CACHING
};
};
struct
ttm_placement
placement
;
struct
ttm_placement
placement
;
struct
ttm_mem_reg
tmp_
mem
;
struct
ttm_mem_reg
tmp_
reg
;
int
ret
;
int
ret
;
placement
.
num_placement
=
placement
.
num_busy_placement
=
1
;
placement
.
num_placement
=
placement
.
num_busy_placement
=
1
;
placement
.
placement
=
placement
.
busy_placement
=
&
placement_memtype
;
placement
.
placement
=
placement
.
busy_placement
=
&
placement_memtype
;
tmp_
mem
=
*
new_mem
;
tmp_
reg
=
*
new_reg
;
tmp_
mem
.
mm_node
=
NULL
;
tmp_
reg
.
mm_node
=
NULL
;
ret
=
ttm_bo_mem_space
(
bo
,
&
placement
,
&
tmp_
mem
,
intr
,
no_wait_gpu
);
ret
=
ttm_bo_mem_space
(
bo
,
&
placement
,
&
tmp_
reg
,
intr
,
no_wait_gpu
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
ret
=
ttm_bo_move_ttm
(
bo
,
intr
,
no_wait_gpu
,
&
tmp_
mem
);
ret
=
ttm_bo_move_ttm
(
bo
,
intr
,
no_wait_gpu
,
&
tmp_
reg
);
if
(
ret
)
if
(
ret
)
goto
out
;
goto
out
;
ret
=
nouveau_bo_move_m2mf
(
bo
,
true
,
intr
,
no_wait_gpu
,
new_
mem
);
ret
=
nouveau_bo_move_m2mf
(
bo
,
true
,
intr
,
no_wait_gpu
,
new_
reg
);
if
(
ret
)
if
(
ret
)
goto
out
;
goto
out
;
out:
out:
ttm_bo_mem_put
(
bo
,
&
tmp_
mem
);
ttm_bo_mem_put
(
bo
,
&
tmp_
reg
);
return
ret
;
return
ret
;
}
}
static
void
static
void
nouveau_bo_move_ntfy
(
struct
ttm_buffer_object
*
bo
,
bool
evict
,
nouveau_bo_move_ntfy
(
struct
ttm_buffer_object
*
bo
,
bool
evict
,
struct
ttm_mem_reg
*
new_
mem
)
struct
ttm_mem_reg
*
new_
reg
)
{
{
struct
nouveau_bo
*
nvbo
=
nouveau_bo
(
bo
);
struct
nouveau_bo
*
nvbo
=
nouveau_bo
(
bo
);
struct
nvkm_vma
*
vma
;
struct
nvkm_vma
*
vma
;
...
@@ -1206,10 +1206,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
...
@@ -1206,10 +1206,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
return
;
return
;
list_for_each_entry
(
vma
,
&
nvbo
->
vma_list
,
head
)
{
list_for_each_entry
(
vma
,
&
nvbo
->
vma_list
,
head
)
{
if
(
new_
mem
&&
new_mem
->
mem_type
!=
TTM_PL_SYSTEM
&&
if
(
new_
reg
&&
new_reg
->
mem_type
!=
TTM_PL_SYSTEM
&&
(
new_
mem
->
mem_type
==
TTM_PL_VRAM
||
(
new_
reg
->
mem_type
==
TTM_PL_VRAM
||
nvbo
->
page_shift
!=
vma
->
vm
->
mmu
->
lpg_shift
))
{
nvbo
->
page_shift
!=
vma
->
vm
->
mmu
->
lpg_shift
))
{
nvkm_vm_map
(
vma
,
new_
mem
->
mm_node
);
nvkm_vm_map
(
vma
,
new_
reg
->
mm_node
);
}
else
{
}
else
{
WARN_ON
(
ttm_bo_wait
(
bo
,
false
,
false
));
WARN_ON
(
ttm_bo_wait
(
bo
,
false
,
false
));
nvkm_vm_unmap
(
vma
);
nvkm_vm_unmap
(
vma
);
...
@@ -1218,20 +1218,20 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
...
@@ -1218,20 +1218,20 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
}
}
static
int
static
int
nouveau_bo_vm_bind
(
struct
ttm_buffer_object
*
bo
,
struct
ttm_mem_reg
*
new_
mem
,
nouveau_bo_vm_bind
(
struct
ttm_buffer_object
*
bo
,
struct
ttm_mem_reg
*
new_
reg
,
struct
nouveau_drm_tile
**
new_tile
)
struct
nouveau_drm_tile
**
new_tile
)
{
{
struct
nouveau_drm
*
drm
=
nouveau_bdev
(
bo
->
bdev
);
struct
nouveau_drm
*
drm
=
nouveau_bdev
(
bo
->
bdev
);
struct
drm_device
*
dev
=
drm
->
dev
;
struct
drm_device
*
dev
=
drm
->
dev
;
struct
nouveau_bo
*
nvbo
=
nouveau_bo
(
bo
);
struct
nouveau_bo
*
nvbo
=
nouveau_bo
(
bo
);
u64
offset
=
new_
mem
->
start
<<
PAGE_SHIFT
;
u64
offset
=
new_
reg
->
start
<<
PAGE_SHIFT
;
*
new_tile
=
NULL
;
*
new_tile
=
NULL
;
if
(
new_
mem
->
mem_type
!=
TTM_PL_VRAM
)
if
(
new_
reg
->
mem_type
!=
TTM_PL_VRAM
)
return
0
;
return
0
;
if
(
drm
->
client
.
device
.
info
.
family
>=
NV_DEVICE_INFO_V0_CELSIUS
)
{
if
(
drm
->
client
.
device
.
info
.
family
>=
NV_DEVICE_INFO_V0_CELSIUS
)
{
*
new_tile
=
nv10_bo_set_tiling
(
dev
,
offset
,
new_
mem
->
size
,
*
new_tile
=
nv10_bo_set_tiling
(
dev
,
offset
,
new_
reg
->
size
,
nvbo
->
tile_mode
,
nvbo
->
tile_mode
,
nvbo
->
tile_flags
);
nvbo
->
tile_flags
);
}
}
...
@@ -1254,11 +1254,11 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
...
@@ -1254,11 +1254,11 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
static
int
static
int
nouveau_bo_move
(
struct
ttm_buffer_object
*
bo
,
bool
evict
,
bool
intr
,
nouveau_bo_move
(
struct
ttm_buffer_object
*
bo
,
bool
evict
,
bool
intr
,
bool
no_wait_gpu
,
struct
ttm_mem_reg
*
new_
mem
)
bool
no_wait_gpu
,
struct
ttm_mem_reg
*
new_
reg
)
{
{
struct
nouveau_drm
*
drm
=
nouveau_bdev
(
bo
->
bdev
);
struct
nouveau_drm
*
drm
=
nouveau_bdev
(
bo
->
bdev
);
struct
nouveau_bo
*
nvbo
=
nouveau_bo
(
bo
);
struct
nouveau_bo
*
nvbo
=
nouveau_bo
(
bo
);
struct
ttm_mem_reg
*
old_
mem
=
&
bo
->
mem
;
struct
ttm_mem_reg
*
old_
reg
=
&
bo
->
mem
;
struct
nouveau_drm_tile
*
new_tile
=
NULL
;
struct
nouveau_drm_tile
*
new_tile
=
NULL
;
int
ret
=
0
;
int
ret
=
0
;
...
@@ -1270,30 +1270,30 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
...
@@ -1270,30 +1270,30 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
NV_WARN
(
drm
,
"Moving pinned object %p!
\n
"
,
nvbo
);
NV_WARN
(
drm
,
"Moving pinned object %p!
\n
"
,
nvbo
);
if
(
drm
->
client
.
device
.
info
.
family
<
NV_DEVICE_INFO_V0_TESLA
)
{
if
(
drm
->
client
.
device
.
info
.
family
<
NV_DEVICE_INFO_V0_TESLA
)
{
ret
=
nouveau_bo_vm_bind
(
bo
,
new_
mem
,
&
new_tile
);
ret
=
nouveau_bo_vm_bind
(
bo
,
new_
reg
,
&
new_tile
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
}
}
/* Fake bo copy. */
/* Fake bo copy. */
if
(
old_
mem
->
mem_type
==
TTM_PL_SYSTEM
&&
!
bo
->
ttm
)
{
if
(
old_
reg
->
mem_type
==
TTM_PL_SYSTEM
&&
!
bo
->
ttm
)
{
BUG_ON
(
bo
->
mem
.
mm_node
!=
NULL
);
BUG_ON
(
bo
->
mem
.
mm_node
!=
NULL
);
bo
->
mem
=
*
new_
mem
;
bo
->
mem
=
*
new_
reg
;
new_
mem
->
mm_node
=
NULL
;
new_
reg
->
mm_node
=
NULL
;
goto
out
;
goto
out
;
}
}
/* Hardware assisted copy. */
/* Hardware assisted copy. */
if
(
drm
->
ttm
.
move
)
{
if
(
drm
->
ttm
.
move
)
{
if
(
new_
mem
->
mem_type
==
TTM_PL_SYSTEM
)
if
(
new_
reg
->
mem_type
==
TTM_PL_SYSTEM
)
ret
=
nouveau_bo_move_flipd
(
bo
,
evict
,
intr
,
ret
=
nouveau_bo_move_flipd
(
bo
,
evict
,
intr
,
no_wait_gpu
,
new_
mem
);
no_wait_gpu
,
new_
reg
);
else
if
(
old_
mem
->
mem_type
==
TTM_PL_SYSTEM
)
else
if
(
old_
reg
->
mem_type
==
TTM_PL_SYSTEM
)
ret
=
nouveau_bo_move_flips
(
bo
,
evict
,
intr
,
ret
=
nouveau_bo_move_flips
(
bo
,
evict
,
intr
,
no_wait_gpu
,
new_
mem
);
no_wait_gpu
,
new_
reg
);
else
else
ret
=
nouveau_bo_move_m2mf
(
bo
,
evict
,
intr
,
ret
=
nouveau_bo_move_m2mf
(
bo
,
evict
,
intr
,
no_wait_gpu
,
new_
mem
);
no_wait_gpu
,
new_
reg
);
if
(
!
ret
)
if
(
!
ret
)
goto
out
;
goto
out
;
}
}
...
@@ -1301,7 +1301,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
...
@@ -1301,7 +1301,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Fallback to software copy. */
/* Fallback to software copy. */
ret
=
ttm_bo_wait
(
bo
,
intr
,
no_wait_gpu
);
ret
=
ttm_bo_wait
(
bo
,
intr
,
no_wait_gpu
);
if
(
ret
==
0
)
if
(
ret
==
0
)
ret
=
ttm_bo_move_memcpy
(
bo
,
intr
,
no_wait_gpu
,
new_
mem
);
ret
=
ttm_bo_move_memcpy
(
bo
,
intr
,
no_wait_gpu
,
new_
reg
);
out:
out:
if
(
drm
->
client
.
device
.
info
.
family
<
NV_DEVICE_INFO_V0_TESLA
)
{
if
(
drm
->
client
.
device
.
info
.
family
<
NV_DEVICE_INFO_V0_TESLA
)
{
...
@@ -1324,54 +1324,54 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
...
@@ -1324,54 +1324,54 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
}
}
static
int
static
int
nouveau_ttm_io_mem_reserve
(
struct
ttm_bo_device
*
bdev
,
struct
ttm_mem_reg
*
mem
)
nouveau_ttm_io_mem_reserve
(
struct
ttm_bo_device
*
bdev
,
struct
ttm_mem_reg
*
reg
)
{
{
struct
ttm_mem_type_manager
*
man
=
&
bdev
->
man
[
mem
->
mem_type
];
struct
ttm_mem_type_manager
*
man
=
&
bdev
->
man
[
reg
->
mem_type
];
struct
nouveau_drm
*
drm
=
nouveau_bdev
(
bdev
);
struct
nouveau_drm
*
drm
=
nouveau_bdev
(
bdev
);
struct
nvkm_device
*
device
=
nvxx_device
(
&
drm
->
client
.
device
);
struct
nvkm_device
*
device
=
nvxx_device
(
&
drm
->
client
.
device
);
struct
nvkm_mem
*
node
=
mem
->
mm_node
;
struct
nvkm_mem
*
mem
=
reg
->
mm_node
;
int
ret
;
int
ret
;
mem
->
bus
.
addr
=
NULL
;
reg
->
bus
.
addr
=
NULL
;
mem
->
bus
.
offset
=
0
;
reg
->
bus
.
offset
=
0
;
mem
->
bus
.
size
=
mem
->
num_pages
<<
PAGE_SHIFT
;
reg
->
bus
.
size
=
reg
->
num_pages
<<
PAGE_SHIFT
;
mem
->
bus
.
base
=
0
;
reg
->
bus
.
base
=
0
;
mem
->
bus
.
is_iomem
=
false
;
reg
->
bus
.
is_iomem
=
false
;
if
(
!
(
man
->
flags
&
TTM_MEMTYPE_FLAG_MAPPABLE
))
if
(
!
(
man
->
flags
&
TTM_MEMTYPE_FLAG_MAPPABLE
))
return
-
EINVAL
;
return
-
EINVAL
;
switch
(
mem
->
mem_type
)
{
switch
(
reg
->
mem_type
)
{
case
TTM_PL_SYSTEM
:
case
TTM_PL_SYSTEM
:
/* System memory */
/* System memory */
return
0
;
return
0
;
case
TTM_PL_TT
:
case
TTM_PL_TT
:
#if IS_ENABLED(CONFIG_AGP)
#if IS_ENABLED(CONFIG_AGP)
if
(
drm
->
agp
.
bridge
)
{
if
(
drm
->
agp
.
bridge
)
{
mem
->
bus
.
offset
=
mem
->
start
<<
PAGE_SHIFT
;
reg
->
bus
.
offset
=
reg
->
start
<<
PAGE_SHIFT
;
mem
->
bus
.
base
=
drm
->
agp
.
base
;
reg
->
bus
.
base
=
drm
->
agp
.
base
;
mem
->
bus
.
is_iomem
=
!
drm
->
agp
.
cma
;
reg
->
bus
.
is_iomem
=
!
drm
->
agp
.
cma
;
}
}
#endif
#endif
if
(
drm
->
client
.
device
.
info
.
family
<
NV_DEVICE_INFO_V0_TESLA
||
!
node
->
memtype
)
if
(
drm
->
client
.
device
.
info
.
family
<
NV_DEVICE_INFO_V0_TESLA
||
!
mem
->
memtype
)
/* untiled */
/* untiled */
break
;
break
;
/* fallthrough, tiled memory */
/* fallthrough, tiled memory */
case
TTM_PL_VRAM
:
case
TTM_PL_VRAM
:
mem
->
bus
.
offset
=
mem
->
start
<<
PAGE_SHIFT
;
reg
->
bus
.
offset
=
reg
->
start
<<
PAGE_SHIFT
;
mem
->
bus
.
base
=
device
->
func
->
resource_addr
(
device
,
1
);
reg
->
bus
.
base
=
device
->
func
->
resource_addr
(
device
,
1
);
mem
->
bus
.
is_iomem
=
true
;
reg
->
bus
.
is_iomem
=
true
;
if
(
drm
->
client
.
device
.
info
.
family
>=
NV_DEVICE_INFO_V0_TESLA
)
{
if
(
drm
->
client
.
device
.
info
.
family
>=
NV_DEVICE_INFO_V0_TESLA
)
{
struct
nvkm_bar
*
bar
=
nvxx_bar
(
&
drm
->
client
.
device
);
struct
nvkm_bar
*
bar
=
nvxx_bar
(
&
drm
->
client
.
device
);
int
page_shift
=
12
;
int
page_shift
=
12
;
if
(
drm
->
client
.
device
.
info
.
family
>=
NV_DEVICE_INFO_V0_FERMI
)
if
(
drm
->
client
.
device
.
info
.
family
>=
NV_DEVICE_INFO_V0_FERMI
)
page_shift
=
node
->
page_shift
;
page_shift
=
mem
->
page_shift
;
ret
=
nvkm_bar_umap
(
bar
,
node
->
size
<<
12
,
page_shift
,
ret
=
nvkm_bar_umap
(
bar
,
mem
->
size
<<
12
,
page_shift
,
&
node
->
bar_vma
);
&
mem
->
bar_vma
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
nvkm_vm_map
(
&
node
->
bar_vma
,
node
);
nvkm_vm_map
(
&
mem
->
bar_vma
,
mem
);
mem
->
bus
.
offset
=
node
->
bar_vma
.
offset
;
reg
->
bus
.
offset
=
mem
->
bar_vma
.
offset
;
}
}
break
;
break
;
default:
default:
...
@@ -1381,15 +1381,15 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
...
@@ -1381,15 +1381,15 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
}
}
static
void
static
void
nouveau_ttm_io_mem_free
(
struct
ttm_bo_device
*
bdev
,
struct
ttm_mem_reg
*
mem
)
nouveau_ttm_io_mem_free
(
struct
ttm_bo_device
*
bdev
,
struct
ttm_mem_reg
*
reg
)
{
{
struct
nvkm_mem
*
node
=
mem
->
mm_node
;
struct
nvkm_mem
*
mem
=
reg
->
mm_node
;
if
(
!
node
->
bar_vma
.
node
)
if
(
!
mem
->
bar_vma
.
node
)
return
;
return
;
nvkm_vm_unmap
(
&
node
->
bar_vma
);
nvkm_vm_unmap
(
&
mem
->
bar_vma
);
nvkm_vm_put
(
&
node
->
bar_vma
);
nvkm_vm_put
(
&
mem
->
bar_vma
);
}
}
static
int
static
int
...
...
drivers/gpu/drm/nouveau/nouveau_sgdma.c
View file @
605f9ccd
...
@@ -24,10 +24,10 @@ nouveau_sgdma_destroy(struct ttm_tt *ttm)
...
@@ -24,10 +24,10 @@ nouveau_sgdma_destroy(struct ttm_tt *ttm)
}
}
static
int
static
int
nv04_sgdma_bind
(
struct
ttm_tt
*
ttm
,
struct
ttm_mem_reg
*
mem
)
nv04_sgdma_bind
(
struct
ttm_tt
*
ttm
,
struct
ttm_mem_reg
*
reg
)
{
{
struct
nouveau_sgdma_be
*
nvbe
=
(
struct
nouveau_sgdma_be
*
)
ttm
;
struct
nouveau_sgdma_be
*
nvbe
=
(
struct
nouveau_sgdma_be
*
)
ttm
;
struct
nvkm_mem
*
node
=
mem
->
mm_node
;
struct
nvkm_mem
*
node
=
reg
->
mm_node
;
if
(
ttm
->
sg
)
{
if
(
ttm
->
sg
)
{
node
->
sg
=
ttm
->
sg
;
node
->
sg
=
ttm
->
sg
;
...
@@ -36,7 +36,7 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
...
@@ -36,7 +36,7 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
node
->
sg
=
NULL
;
node
->
sg
=
NULL
;
node
->
pages
=
nvbe
->
ttm
.
dma_address
;
node
->
pages
=
nvbe
->
ttm
.
dma_address
;
}
}
node
->
size
=
(
mem
->
num_pages
<<
PAGE_SHIFT
)
>>
12
;
node
->
size
=
(
reg
->
num_pages
<<
PAGE_SHIFT
)
>>
12
;
nvkm_vm_map
(
&
node
->
vma
[
0
],
node
);
nvkm_vm_map
(
&
node
->
vma
[
0
],
node
);
nvbe
->
node
=
node
;
nvbe
->
node
=
node
;
...
@@ -58,10 +58,10 @@ static struct ttm_backend_func nv04_sgdma_backend = {
...
@@ -58,10 +58,10 @@ static struct ttm_backend_func nv04_sgdma_backend = {
};
};
static
int
static
int
nv50_sgdma_bind
(
struct
ttm_tt
*
ttm
,
struct
ttm_mem_reg
*
mem
)
nv50_sgdma_bind
(
struct
ttm_tt
*
ttm
,
struct
ttm_mem_reg
*
reg
)
{
{
struct
nouveau_sgdma_be
*
nvbe
=
(
struct
nouveau_sgdma_be
*
)
ttm
;
struct
nouveau_sgdma_be
*
nvbe
=
(
struct
nouveau_sgdma_be
*
)
ttm
;
struct
nvkm_mem
*
node
=
mem
->
mm_node
;
struct
nvkm_mem
*
node
=
reg
->
mm_node
;
/* noop: bound in move_notify() */
/* noop: bound in move_notify() */
if
(
ttm
->
sg
)
{
if
(
ttm
->
sg
)
{
...
@@ -71,7 +71,7 @@ nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
...
@@ -71,7 +71,7 @@ nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
node
->
sg
=
NULL
;
node
->
sg
=
NULL
;
node
->
pages
=
nvbe
->
ttm
.
dma_address
;
node
->
pages
=
nvbe
->
ttm
.
dma_address
;
}
}
node
->
size
=
(
mem
->
num_pages
<<
PAGE_SHIFT
)
>>
12
;
node
->
size
=
(
reg
->
num_pages
<<
PAGE_SHIFT
)
>>
12
;
return
0
;
return
0
;
}
}
...
...
drivers/gpu/drm/nouveau/nouveau_ttm.c
View file @
605f9ccd
...
@@ -64,19 +64,19 @@ nvkm_mem_node_cleanup(struct nvkm_mem *node)
...
@@ -64,19 +64,19 @@ nvkm_mem_node_cleanup(struct nvkm_mem *node)
static
void
static
void
nouveau_vram_manager_del
(
struct
ttm_mem_type_manager
*
man
,
nouveau_vram_manager_del
(
struct
ttm_mem_type_manager
*
man
,
struct
ttm_mem_reg
*
mem
)
struct
ttm_mem_reg
*
reg
)
{
{
struct
nouveau_drm
*
drm
=
nouveau_bdev
(
man
->
bdev
);
struct
nouveau_drm
*
drm
=
nouveau_bdev
(
man
->
bdev
);
struct
nvkm_ram
*
ram
=
nvxx_fb
(
&
drm
->
client
.
device
)
->
ram
;
struct
nvkm_ram
*
ram
=
nvxx_fb
(
&
drm
->
client
.
device
)
->
ram
;
nvkm_mem_node_cleanup
(
mem
->
mm_node
);
nvkm_mem_node_cleanup
(
reg
->
mm_node
);
ram
->
func
->
put
(
ram
,
(
struct
nvkm_mem
**
)
&
mem
->
mm_node
);
ram
->
func
->
put
(
ram
,
(
struct
nvkm_mem
**
)
&
reg
->
mm_node
);
}
}
static
int
static
int
nouveau_vram_manager_new
(
struct
ttm_mem_type_manager
*
man
,
nouveau_vram_manager_new
(
struct
ttm_mem_type_manager
*
man
,
struct
ttm_buffer_object
*
bo
,
struct
ttm_buffer_object
*
bo
,
const
struct
ttm_place
*
place
,
const
struct
ttm_place
*
place
,
struct
ttm_mem_reg
*
mem
)
struct
ttm_mem_reg
*
reg
)
{
{
struct
nouveau_drm
*
drm
=
nouveau_bdev
(
man
->
bdev
);
struct
nouveau_drm
*
drm
=
nouveau_bdev
(
man
->
bdev
);
struct
nvkm_ram
*
ram
=
nvxx_fb
(
&
drm
->
client
.
device
)
->
ram
;
struct
nvkm_ram
*
ram
=
nvxx_fb
(
&
drm
->
client
.
device
)
->
ram
;
...
@@ -91,18 +91,18 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
...
@@ -91,18 +91,18 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
if
(
nvbo
->
tile_flags
&
NOUVEAU_GEM_TILE_NONCONTIG
)
if
(
nvbo
->
tile_flags
&
NOUVEAU_GEM_TILE_NONCONTIG
)
size_nc
=
1
<<
nvbo
->
page_shift
;
size_nc
=
1
<<
nvbo
->
page_shift
;
ret
=
ram
->
func
->
get
(
ram
,
mem
->
num_pages
<<
PAGE_SHIFT
,
ret
=
ram
->
func
->
get
(
ram
,
reg
->
num_pages
<<
PAGE_SHIFT
,
mem
->
page_alignment
<<
PAGE_SHIFT
,
size_nc
,
reg
->
page_alignment
<<
PAGE_SHIFT
,
size_nc
,
(
nvbo
->
tile_flags
>>
8
)
&
0x3ff
,
&
node
);
(
nvbo
->
tile_flags
>>
8
)
&
0x3ff
,
&
node
);
if
(
ret
)
{
if
(
ret
)
{
mem
->
mm_node
=
NULL
;
reg
->
mm_node
=
NULL
;
return
(
ret
==
-
ENOSPC
)
?
0
:
ret
;
return
(
ret
==
-
ENOSPC
)
?
0
:
ret
;
}
}
node
->
page_shift
=
nvbo
->
page_shift
;
node
->
page_shift
=
nvbo
->
page_shift
;
mem
->
mm_node
=
node
;
reg
->
mm_node
=
node
;
mem
->
start
=
node
->
offset
>>
PAGE_SHIFT
;
reg
->
start
=
node
->
offset
>>
PAGE_SHIFT
;
return
0
;
return
0
;
}
}
...
@@ -127,18 +127,18 @@ nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
...
@@ -127,18 +127,18 @@ nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
static
void
static
void
nouveau_gart_manager_del
(
struct
ttm_mem_type_manager
*
man
,
nouveau_gart_manager_del
(
struct
ttm_mem_type_manager
*
man
,
struct
ttm_mem_reg
*
mem
)
struct
ttm_mem_reg
*
reg
)
{
{
nvkm_mem_node_cleanup
(
mem
->
mm_node
);
nvkm_mem_node_cleanup
(
reg
->
mm_node
);
kfree
(
mem
->
mm_node
);
kfree
(
reg
->
mm_node
);
mem
->
mm_node
=
NULL
;
reg
->
mm_node
=
NULL
;
}
}
static
int
static
int
nouveau_gart_manager_new
(
struct
ttm_mem_type_manager
*
man
,
nouveau_gart_manager_new
(
struct
ttm_mem_type_manager
*
man
,
struct
ttm_buffer_object
*
bo
,
struct
ttm_buffer_object
*
bo
,
const
struct
ttm_place
*
place
,
const
struct
ttm_place
*
place
,
struct
ttm_mem_reg
*
mem
)
struct
ttm_mem_reg
*
reg
)
{
{
struct
nouveau_drm
*
drm
=
nouveau_bdev
(
bo
->
bdev
);
struct
nouveau_drm
*
drm
=
nouveau_bdev
(
bo
->
bdev
);
struct
nouveau_bo
*
nvbo
=
nouveau_bo
(
bo
);
struct
nouveau_bo
*
nvbo
=
nouveau_bo
(
bo
);
...
@@ -173,8 +173,8 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
...
@@ -173,8 +173,8 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
break
;
break
;
}
}
mem
->
mm_node
=
node
;
reg
->
mm_node
=
node
;
mem
->
start
=
0
;
reg
->
start
=
0
;
return
0
;
return
0
;
}
}
...
@@ -215,20 +215,20 @@ nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
...
@@ -215,20 +215,20 @@ nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
}
}
static
void
static
void
nv04_gart_manager_del
(
struct
ttm_mem_type_manager
*
man
,
struct
ttm_mem_reg
*
mem
)
nv04_gart_manager_del
(
struct
ttm_mem_type_manager
*
man
,
struct
ttm_mem_reg
*
reg
)
{
{
struct
nvkm_mem
*
node
=
mem
->
mm_node
;
struct
nvkm_mem
*
node
=
reg
->
mm_node
;
if
(
node
->
vma
[
0
].
node
)
if
(
node
->
vma
[
0
].
node
)
nvkm_vm_put
(
&
node
->
vma
[
0
]);
nvkm_vm_put
(
&
node
->
vma
[
0
]);
kfree
(
mem
->
mm_node
);
kfree
(
reg
->
mm_node
);
mem
->
mm_node
=
NULL
;
reg
->
mm_node
=
NULL
;
}
}
static
int
static
int
nv04_gart_manager_new
(
struct
ttm_mem_type_manager
*
man
,
nv04_gart_manager_new
(
struct
ttm_mem_type_manager
*
man
,
struct
ttm_buffer_object
*
bo
,
struct
ttm_buffer_object
*
bo
,
const
struct
ttm_place
*
place
,
const
struct
ttm_place
*
place
,
struct
ttm_mem_reg
*
mem
)
struct
ttm_mem_reg
*
reg
)
{
{
struct
nvkm_mem
*
node
;
struct
nvkm_mem
*
node
;
int
ret
;
int
ret
;
...
@@ -239,15 +239,15 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
...
@@ -239,15 +239,15 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
node
->
page_shift
=
12
;
node
->
page_shift
=
12
;
ret
=
nvkm_vm_get
(
man
->
priv
,
mem
->
num_pages
<<
12
,
node
->
page_shift
,
ret
=
nvkm_vm_get
(
man
->
priv
,
reg
->
num_pages
<<
12
,
node
->
page_shift
,
NV_MEM_ACCESS_RW
,
&
node
->
vma
[
0
]);
NV_MEM_ACCESS_RW
,
&
node
->
vma
[
0
]);
if
(
ret
)
{
if
(
ret
)
{
kfree
(
node
);
kfree
(
node
);
return
ret
;
return
ret
;
}
}
mem
->
mm_node
=
node
;
reg
->
mm_node
=
node
;
mem
->
start
=
node
->
vma
[
0
].
offset
>>
PAGE_SHIFT
;
reg
->
start
=
node
->
vma
[
0
].
offset
>>
PAGE_SHIFT
;
return
0
;
return
0
;
}
}
...
...
drivers/gpu/drm/nouveau/nv17_fence.c
View file @
605f9ccd
...
@@ -76,9 +76,9 @@ nv17_fence_context_new(struct nouveau_channel *chan)
...
@@ -76,9 +76,9 @@ nv17_fence_context_new(struct nouveau_channel *chan)
{
{
struct
nv10_fence_priv
*
priv
=
chan
->
drm
->
fence
;
struct
nv10_fence_priv
*
priv
=
chan
->
drm
->
fence
;
struct
nv10_fence_chan
*
fctx
;
struct
nv10_fence_chan
*
fctx
;
struct
ttm_mem_reg
*
mem
=
&
priv
->
bo
->
bo
.
mem
;
struct
ttm_mem_reg
*
reg
=
&
priv
->
bo
->
bo
.
mem
;
u32
start
=
mem
->
start
*
PAGE_SIZE
;
u32
start
=
reg
->
start
*
PAGE_SIZE
;
u32
limit
=
start
+
mem
->
size
-
1
;
u32
limit
=
start
+
reg
->
size
-
1
;
int
ret
=
0
;
int
ret
=
0
;
fctx
=
chan
->
fence
=
kzalloc
(
sizeof
(
*
fctx
),
GFP_KERNEL
);
fctx
=
chan
->
fence
=
kzalloc
(
sizeof
(
*
fctx
),
GFP_KERNEL
);
...
...
drivers/gpu/drm/nouveau/nv50_fence.c
View file @
605f9ccd
...
@@ -37,9 +37,9 @@ nv50_fence_context_new(struct nouveau_channel *chan)
...
@@ -37,9 +37,9 @@ nv50_fence_context_new(struct nouveau_channel *chan)
{
{
struct
nv10_fence_priv
*
priv
=
chan
->
drm
->
fence
;
struct
nv10_fence_priv
*
priv
=
chan
->
drm
->
fence
;
struct
nv10_fence_chan
*
fctx
;
struct
nv10_fence_chan
*
fctx
;
struct
ttm_mem_reg
*
mem
=
&
priv
->
bo
->
bo
.
mem
;
struct
ttm_mem_reg
*
reg
=
&
priv
->
bo
->
bo
.
mem
;
u32
start
=
mem
->
start
*
PAGE_SIZE
;
u32
start
=
reg
->
start
*
PAGE_SIZE
;
u32
limit
=
start
+
mem
->
size
-
1
;
u32
limit
=
start
+
reg
->
size
-
1
;
int
ret
;
int
ret
;
fctx
=
chan
->
fence
=
kzalloc
(
sizeof
(
*
fctx
),
GFP_KERNEL
);
fctx
=
chan
->
fence
=
kzalloc
(
sizeof
(
*
fctx
),
GFP_KERNEL
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment