Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
87744403
Commit
87744403
authored
Aug 20, 2015
by
Ben Skeggs
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
drm/nouveau/fifo: switch to device pri macros
Signed-off-by:
Ben Skeggs
<
bskeggs@redhat.com
>
parent
2fde1f1c
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
345 additions
and
289 deletions
+345
-289
drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
+11
-7
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+88
-71
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+92
-72
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+88
-82
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
+12
-11
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
+27
-24
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+27
-22
No files found.
drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
View file @
87744403
...
...
@@ -80,10 +80,11 @@ static int
g84_fifo_context_detach
(
struct
nvkm_object
*
parent
,
bool
suspend
,
struct
nvkm_object
*
object
)
{
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
nv50_fifo
*
fifo
=
(
void
*
)
parent
->
engine
;
struct
nv50_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
struct
nv50_fifo_chan
*
chan
=
(
void
*
)
parent
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_bar
*
bar
=
device
->
bar
;
u32
addr
,
save
,
engn
;
bool
done
;
...
...
@@ -103,10 +104,10 @@ g84_fifo_context_detach(struct nvkm_object *parent, bool suspend,
return
-
EINVAL
;
}
save
=
nv
_mask
(
fifo
,
0x002520
,
0x0000003f
,
1
<<
engn
);
nv
_wr32
(
fifo
,
0x0032fc
,
nv_gpuobj
(
base
)
->
addr
>>
12
);
save
=
nv
km_mask
(
device
,
0x002520
,
0x0000003f
,
1
<<
engn
);
nv
km_wr32
(
device
,
0x0032fc
,
nv_gpuobj
(
base
)
->
addr
>>
12
);
done
=
nv_wait_ne
(
fifo
,
0x0032fc
,
0xffffffff
,
0xffffffff
);
nv
_wr32
(
fifo
,
0x002520
,
save
);
nv
km_wr32
(
device
,
0x002520
,
save
);
if
(
!
done
)
{
nv_error
(
fifo
,
"channel %d [%s] unload timeout
\n
"
,
chan
->
base
.
chid
,
nvkm_client_name
(
chan
));
...
...
@@ -313,6 +314,7 @@ g84_fifo_chan_init(struct nvkm_object *object)
struct
nv50_fifo_base
*
base
=
(
void
*
)
object
->
parent
;
struct
nv50_fifo_chan
*
chan
=
(
void
*
)
object
;
struct
nvkm_gpuobj
*
ramfc
=
base
->
ramfc
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
chid
=
chan
->
base
.
chid
;
int
ret
;
...
...
@@ -320,7 +322,7 @@ g84_fifo_chan_init(struct nvkm_object *object)
if
(
ret
)
return
ret
;
nv
_wr32
(
fifo
,
0x002600
+
(
chid
*
4
),
0x80000000
|
ramfc
->
addr
>>
8
);
nv
km_wr32
(
device
,
0x002600
+
(
chid
*
4
),
0x80000000
|
ramfc
->
addr
>>
8
);
nv50_fifo_playlist_update
(
fifo
);
return
0
;
}
...
...
@@ -422,14 +424,16 @@ static void
g84_fifo_uevent_init
(
struct
nvkm_event
*
event
,
int
type
,
int
index
)
{
struct
nvkm_fifo
*
fifo
=
container_of
(
event
,
typeof
(
*
fifo
),
uevent
);
nv_mask
(
fifo
,
0x002140
,
0x40000000
,
0x40000000
);
struct
nvkm_device
*
device
=
fifo
->
engine
.
subdev
.
device
;
nvkm_mask
(
device
,
0x002140
,
0x40000000
,
0x40000000
);
}
static
void
g84_fifo_uevent_fini
(
struct
nvkm_event
*
event
,
int
type
,
int
index
)
{
struct
nvkm_fifo
*
fifo
=
container_of
(
event
,
typeof
(
*
fifo
),
uevent
);
nv_mask
(
fifo
,
0x002140
,
0x40000000
,
0x00000000
);
struct
nvkm_device
*
device
=
fifo
->
engine
.
subdev
.
device
;
nvkm_mask
(
device
,
0x002140
,
0x40000000
,
0x00000000
);
}
static
const
struct
nvkm_event_func
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
View file @
87744403
...
...
@@ -76,7 +76,8 @@ struct gf100_fifo_chan {
static
void
gf100_fifo_runlist_update
(
struct
gf100_fifo
*
fifo
)
{
struct
nvkm_bar
*
bar
=
nvkm_bar
(
fifo
);
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_bar
*
bar
=
device
->
bar
;
struct
nvkm_gpuobj
*
cur
;
int
i
,
p
;
...
...
@@ -94,11 +95,11 @@ gf100_fifo_runlist_update(struct gf100_fifo *fifo)
}
bar
->
flush
(
bar
);
nv
_wr32
(
fifo
,
0x002270
,
cur
->
addr
>>
12
);
nv
_wr32
(
fifo
,
0x002274
,
0x01f00000
|
(
p
>>
3
));
nv
km_wr32
(
device
,
0x002270
,
cur
->
addr
>>
12
);
nv
km_wr32
(
device
,
0x002274
,
0x01f00000
|
(
p
>>
3
));
if
(
wait_event_timeout
(
fifo
->
runlist
.
wait
,
!
(
nv
_rd32
(
fifo
,
0x00227c
)
&
0x00100000
),
!
(
nv
km_rd32
(
device
,
0x00227c
)
&
0x00100000
),
msecs_to_jiffies
(
2000
))
==
0
)
nv_error
(
fifo
,
"runlist update timeout
\n
"
);
mutex_unlock
(
&
nv_subdev
(
fifo
)
->
mutex
);
...
...
@@ -145,10 +146,11 @@ static int
gf100_fifo_context_detach
(
struct
nvkm_object
*
parent
,
bool
suspend
,
struct
nvkm_object
*
object
)
{
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
gf100_fifo
*
fifo
=
(
void
*
)
parent
->
engine
;
struct
gf100_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
struct
gf100_fifo_chan
*
chan
=
(
void
*
)
parent
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_bar
*
bar
=
device
->
bar
;
u32
addr
;
switch
(
nv_engidx
(
object
->
engine
))
{
...
...
@@ -163,7 +165,7 @@ gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
return
-
EINVAL
;
}
nv
_wr32
(
fifo
,
0x002634
,
chan
->
base
.
chid
);
nv
km_wr32
(
device
,
0x002634
,
chan
->
base
.
chid
);
if
(
!
nv_wait
(
fifo
,
0x002634
,
0xffffffff
,
chan
->
base
.
chid
))
{
nv_error
(
fifo
,
"channel %d [%s] kick timeout
\n
"
,
chan
->
base
.
chid
,
nvkm_client_name
(
chan
));
...
...
@@ -253,6 +255,7 @@ gf100_fifo_chan_init(struct nvkm_object *object)
struct
nvkm_gpuobj
*
base
=
nv_gpuobj
(
object
->
parent
);
struct
gf100_fifo
*
fifo
=
(
void
*
)
object
->
engine
;
struct
gf100_fifo_chan
*
chan
=
(
void
*
)
object
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
chid
=
chan
->
base
.
chid
;
int
ret
;
...
...
@@ -260,10 +263,10 @@ gf100_fifo_chan_init(struct nvkm_object *object)
if
(
ret
)
return
ret
;
nv
_wr32
(
fifo
,
0x003000
+
(
chid
*
8
),
0xc0000000
|
base
->
addr
>>
12
);
nv
km_wr32
(
device
,
0x003000
+
(
chid
*
8
),
0xc0000000
|
base
->
addr
>>
12
);
if
(
chan
->
state
==
STOPPED
&&
(
chan
->
state
=
RUNNING
)
==
RUNNING
)
{
nv
_wr32
(
fifo
,
0x003004
+
(
chid
*
8
),
0x001f0001
);
nv
km_wr32
(
device
,
0x003004
+
(
chid
*
8
),
0x001f0001
);
gf100_fifo_runlist_update
(
fifo
);
}
...
...
@@ -277,16 +280,17 @@ gf100_fifo_chan_fini(struct nvkm_object *object, bool suspend)
{
struct
gf100_fifo
*
fifo
=
(
void
*
)
object
->
engine
;
struct
gf100_fifo_chan
*
chan
=
(
void
*
)
object
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
chid
=
chan
->
base
.
chid
;
if
(
chan
->
state
==
RUNNING
&&
(
chan
->
state
=
STOPPED
)
==
STOPPED
)
{
nv
_mask
(
fifo
,
0x003004
+
(
chid
*
8
),
0x00000001
,
0x00000000
);
nv
km_mask
(
device
,
0x003004
+
(
chid
*
8
),
0x00000001
,
0x00000000
);
gf100_fifo_runlist_update
(
fifo
);
}
gf100_fifo_intr_engine
(
fifo
);
nv
_wr32
(
fifo
,
0x003000
+
(
chid
*
8
),
0x00000000
);
nv
km_wr32
(
device
,
0x003000
+
(
chid
*
8
),
0x00000000
);
return
nvkm_fifo_channel_fini
(
&
chan
->
base
,
suspend
);
}
...
...
@@ -408,6 +412,7 @@ static void
gf100_fifo_recover_work
(
struct
work_struct
*
work
)
{
struct
gf100_fifo
*
fifo
=
container_of
(
work
,
typeof
(
*
fifo
),
fault
);
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_object
*
engine
;
unsigned
long
flags
;
u32
engn
,
engm
=
0
;
...
...
@@ -420,7 +425,7 @@ gf100_fifo_recover_work(struct work_struct *work)
for
(
todo
=
mask
;
engn
=
__ffs64
(
todo
),
todo
;
todo
&=
~
(
1
<<
engn
))
engm
|=
1
<<
gf100_fifo_engidx
(
fifo
,
engn
);
nv
_mask
(
fifo
,
0x002630
,
engm
,
engm
);
nv
km_mask
(
device
,
0x002630
,
engm
,
engm
);
for
(
todo
=
mask
;
engn
=
__ffs64
(
todo
),
todo
;
todo
&=
~
(
1
<<
engn
))
{
if
((
engine
=
(
void
*
)
nvkm_engine
(
fifo
,
engn
)))
{
...
...
@@ -430,21 +435,22 @@ gf100_fifo_recover_work(struct work_struct *work)
}
gf100_fifo_runlist_update
(
fifo
);
nv
_wr32
(
fifo
,
0x00262c
,
engm
);
nv
_mask
(
fifo
,
0x002630
,
engm
,
0x00000000
);
nv
km_wr32
(
device
,
0x00262c
,
engm
);
nv
km_mask
(
device
,
0x002630
,
engm
,
0x00000000
);
}
static
void
gf100_fifo_recover
(
struct
gf100_fifo
*
fifo
,
struct
nvkm_engine
*
engine
,
struct
gf100_fifo_chan
*
chan
)
{
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
chid
=
chan
->
base
.
chid
;
unsigned
long
flags
;
nv_error
(
fifo
,
"%s engine fault on channel %d, recovering...
\n
"
,
nv_subdev
(
engine
)
->
name
,
chid
);
nv
_mask
(
fifo
,
0x003004
+
(
chid
*
0x08
),
0x00000001
,
0x00000000
);
nv
km_mask
(
device
,
0x003004
+
(
chid
*
0x08
),
0x00000001
,
0x00000000
);
chan
->
state
=
KILLED
;
spin_lock_irqsave
(
&
fifo
->
base
.
lock
,
flags
);
...
...
@@ -488,12 +494,13 @@ gf100_fifo_sched_reason[] = {
static
void
gf100_fifo_intr_sched_ctxsw
(
struct
gf100_fifo
*
fifo
)
{
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_engine
*
engine
;
struct
gf100_fifo_chan
*
chan
;
u32
engn
;
for
(
engn
=
0
;
engn
<
6
;
engn
++
)
{
u32
stat
=
nv
_rd32
(
fifo
,
0x002640
+
(
engn
*
0x04
));
u32
stat
=
nv
km_rd32
(
device
,
0x002640
+
(
engn
*
0x04
));
u32
busy
=
(
stat
&
0x80000000
);
u32
save
=
(
stat
&
0x00100000
);
/* maybe? */
u32
unk0
=
(
stat
&
0x00040000
);
...
...
@@ -514,7 +521,8 @@ gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
static
void
gf100_fifo_intr_sched
(
struct
gf100_fifo
*
fifo
)
{
u32
intr
=
nv_rd32
(
fifo
,
0x00254c
);
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
intr
=
nvkm_rd32
(
device
,
0x00254c
);
u32
code
=
intr
&
0x000000ff
;
const
struct
nvkm_enum
*
en
;
char
enunk
[
6
]
=
""
;
...
...
@@ -596,10 +604,11 @@ gf100_fifo_fault_gpcclient[] = {
static
void
gf100_fifo_intr_fault
(
struct
gf100_fifo
*
fifo
,
int
unit
)
{
u32
inst
=
nv_rd32
(
fifo
,
0x002800
+
(
unit
*
0x10
));
u32
valo
=
nv_rd32
(
fifo
,
0x002804
+
(
unit
*
0x10
));
u32
vahi
=
nv_rd32
(
fifo
,
0x002808
+
(
unit
*
0x10
));
u32
stat
=
nv_rd32
(
fifo
,
0x00280c
+
(
unit
*
0x10
));
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
inst
=
nvkm_rd32
(
device
,
0x002800
+
(
unit
*
0x10
));
u32
valo
=
nvkm_rd32
(
device
,
0x002804
+
(
unit
*
0x10
));
u32
vahi
=
nvkm_rd32
(
device
,
0x002808
+
(
unit
*
0x10
));
u32
stat
=
nvkm_rd32
(
device
,
0x00280c
+
(
unit
*
0x10
));
u32
gpc
=
(
stat
&
0x1f000000
)
>>
24
;
u32
client
=
(
stat
&
0x00001f00
)
>>
8
;
u32
write
=
(
stat
&
0x00000080
);
...
...
@@ -621,13 +630,13 @@ gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
if
(
eu
)
{
switch
(
eu
->
data2
)
{
case
NVDEV_SUBDEV_BAR
:
nv
_mask
(
fifo
,
0x001704
,
0x00000000
,
0x00000000
);
nv
km_mask
(
device
,
0x001704
,
0x00000000
,
0x00000000
);
break
;
case
NVDEV_SUBDEV_INSTMEM
:
nv
_mask
(
fifo
,
0x001714
,
0x00000000
,
0x00000000
);
nv
km_mask
(
device
,
0x001714
,
0x00000000
,
0x00000000
);
break
;
case
NVDEV_ENGINE_IFB
:
nv
_mask
(
fifo
,
0x001718
,
0x00000000
,
0x00000000
);
nv
km_mask
(
device
,
0x001718
,
0x00000000
,
0x00000000
);
break
;
default:
engine
=
nvkm_engine
(
fifo
,
eu
->
data2
);
...
...
@@ -680,10 +689,11 @@ gf100_fifo_pbdma_intr[] = {
static
void
gf100_fifo_intr_pbdma
(
struct
gf100_fifo
*
fifo
,
int
unit
)
{
u32
stat
=
nv_rd32
(
fifo
,
0x040108
+
(
unit
*
0x2000
));
u32
addr
=
nv_rd32
(
fifo
,
0x0400c0
+
(
unit
*
0x2000
));
u32
data
=
nv_rd32
(
fifo
,
0x0400c4
+
(
unit
*
0x2000
));
u32
chid
=
nv_rd32
(
fifo
,
0x040120
+
(
unit
*
0x2000
))
&
0x7f
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
stat
=
nvkm_rd32
(
device
,
0x040108
+
(
unit
*
0x2000
));
u32
addr
=
nvkm_rd32
(
device
,
0x0400c0
+
(
unit
*
0x2000
));
u32
data
=
nvkm_rd32
(
device
,
0x0400c4
+
(
unit
*
0x2000
));
u32
chid
=
nvkm_rd32
(
device
,
0x040120
+
(
unit
*
0x2000
))
&
0x7f
;
u32
subc
=
(
addr
&
0x00070000
)
>>
16
;
u32
mthd
=
(
addr
&
0x00003ffc
);
u32
show
=
stat
;
...
...
@@ -704,35 +714,37 @@ gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
subc
,
mthd
,
data
);
}
nv
_wr32
(
fifo
,
0x0400c0
+
(
unit
*
0x2000
),
0x80600008
);
nv
_wr32
(
fifo
,
0x040108
+
(
unit
*
0x2000
),
stat
);
nv
km_wr32
(
device
,
0x0400c0
+
(
unit
*
0x2000
),
0x80600008
);
nv
km_wr32
(
device
,
0x040108
+
(
unit
*
0x2000
),
stat
);
}
static
void
gf100_fifo_intr_runlist
(
struct
gf100_fifo
*
fifo
)
{
u32
intr
=
nv_rd32
(
fifo
,
0x002a00
);
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
intr
=
nvkm_rd32
(
device
,
0x002a00
);
if
(
intr
&
0x10000000
)
{
wake_up
(
&
fifo
->
runlist
.
wait
);
nv
_wr32
(
fifo
,
0x002a00
,
0x10000000
);
nv
km_wr32
(
device
,
0x002a00
,
0x10000000
);
intr
&=
~
0x10000000
;
}
if
(
intr
)
{
nv_error
(
fifo
,
"RUNLIST 0x%08x
\n
"
,
intr
);
nv
_wr32
(
fifo
,
0x002a00
,
intr
);
nv
km_wr32
(
device
,
0x002a00
,
intr
);
}
}
static
void
gf100_fifo_intr_engine_unit
(
struct
gf100_fifo
*
fifo
,
int
engn
)
{
u32
intr
=
nv_rd32
(
fifo
,
0x0025a8
+
(
engn
*
0x04
));
u32
inte
=
nv_rd32
(
fifo
,
0x002628
);
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
intr
=
nvkm_rd32
(
device
,
0x0025a8
+
(
engn
*
0x04
));
u32
inte
=
nvkm_rd32
(
device
,
0x002628
);
u32
unkn
;
nv
_wr32
(
fifo
,
0x0025a8
+
(
engn
*
0x04
),
intr
);
nv
km_wr32
(
device
,
0x0025a8
+
(
engn
*
0x04
),
intr
);
for
(
unkn
=
0
;
unkn
<
8
;
unkn
++
)
{
u32
ints
=
(
intr
>>
(
unkn
*
0x04
))
&
inte
;
...
...
@@ -742,7 +754,7 @@ gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
}
if
(
ints
)
{
nv_error
(
fifo
,
"ENGINE %d %d %01x"
,
engn
,
unkn
,
ints
);
nv
_mask
(
fifo
,
0x002628
,
ints
,
0
);
nv
km_mask
(
device
,
0x002628
,
ints
,
0
);
}
}
}
...
...
@@ -750,7 +762,8 @@ gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
static
void
gf100_fifo_intr_engine
(
struct
gf100_fifo
*
fifo
)
{
u32
mask
=
nv_rd32
(
fifo
,
0x0025a4
);
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
mask
=
nvkm_rd32
(
device
,
0x0025a4
);
while
(
mask
)
{
u32
unit
=
__ffs
(
mask
);
gf100_fifo_intr_engine_unit
(
fifo
,
unit
);
...
...
@@ -762,53 +775,54 @@ static void
gf100_fifo_intr
(
struct
nvkm_subdev
*
subdev
)
{
struct
gf100_fifo
*
fifo
=
(
void
*
)
subdev
;
u32
mask
=
nv_rd32
(
fifo
,
0x002140
);
u32
stat
=
nv_rd32
(
fifo
,
0x002100
)
&
mask
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
mask
=
nvkm_rd32
(
device
,
0x002140
);
u32
stat
=
nvkm_rd32
(
device
,
0x002100
)
&
mask
;
if
(
stat
&
0x00000001
)
{
u32
intr
=
nv
_rd32
(
fifo
,
0x00252c
);
u32
intr
=
nv
km_rd32
(
device
,
0x00252c
);
nv_warn
(
fifo
,
"INTR 0x00000001: 0x%08x
\n
"
,
intr
);
nv
_wr32
(
fifo
,
0x002100
,
0x00000001
);
nv
km_wr32
(
device
,
0x002100
,
0x00000001
);
stat
&=
~
0x00000001
;
}
if
(
stat
&
0x00000100
)
{
gf100_fifo_intr_sched
(
fifo
);
nv
_wr32
(
fifo
,
0x002100
,
0x00000100
);
nv
km_wr32
(
device
,
0x002100
,
0x00000100
);
stat
&=
~
0x00000100
;
}
if
(
stat
&
0x00010000
)
{
u32
intr
=
nv
_rd32
(
fifo
,
0x00256c
);
u32
intr
=
nv
km_rd32
(
device
,
0x00256c
);
nv_warn
(
fifo
,
"INTR 0x00010000: 0x%08x
\n
"
,
intr
);
nv
_wr32
(
fifo
,
0x002100
,
0x00010000
);
nv
km_wr32
(
device
,
0x002100
,
0x00010000
);
stat
&=
~
0x00010000
;
}
if
(
stat
&
0x01000000
)
{
u32
intr
=
nv
_rd32
(
fifo
,
0x00258c
);
u32
intr
=
nv
km_rd32
(
device
,
0x00258c
);
nv_warn
(
fifo
,
"INTR 0x01000000: 0x%08x
\n
"
,
intr
);
nv
_wr32
(
fifo
,
0x002100
,
0x01000000
);
nv
km_wr32
(
device
,
0x002100
,
0x01000000
);
stat
&=
~
0x01000000
;
}
if
(
stat
&
0x10000000
)
{
u32
mask
=
nv
_rd32
(
fifo
,
0x00259c
);
u32
mask
=
nv
km_rd32
(
device
,
0x00259c
);
while
(
mask
)
{
u32
unit
=
__ffs
(
mask
);
gf100_fifo_intr_fault
(
fifo
,
unit
);
nv
_wr32
(
fifo
,
0x00259c
,
(
1
<<
unit
));
nv
km_wr32
(
device
,
0x00259c
,
(
1
<<
unit
));
mask
&=
~
(
1
<<
unit
);
}
stat
&=
~
0x10000000
;
}
if
(
stat
&
0x20000000
)
{
u32
mask
=
nv
_rd32
(
fifo
,
0x0025a0
);
u32
mask
=
nv
km_rd32
(
device
,
0x0025a0
);
while
(
mask
)
{
u32
unit
=
__ffs
(
mask
);
gf100_fifo_intr_pbdma
(
fifo
,
unit
);
nv
_wr32
(
fifo
,
0x0025a0
,
(
1
<<
unit
));
nv
km_wr32
(
device
,
0x0025a0
,
(
1
<<
unit
));
mask
&=
~
(
1
<<
unit
);
}
stat
&=
~
0x20000000
;
...
...
@@ -826,8 +840,8 @@ gf100_fifo_intr(struct nvkm_subdev *subdev)
if
(
stat
)
{
nv_error
(
fifo
,
"INTR 0x%08x
\n
"
,
stat
);
nv
_mask
(
fifo
,
0x002140
,
stat
,
0x00000000
);
nv
_wr32
(
fifo
,
0x002100
,
stat
);
nv
km_mask
(
device
,
0x002140
,
stat
,
0x00000000
);
nv
km_wr32
(
device
,
0x002100
,
stat
);
}
}
...
...
@@ -835,14 +849,16 @@ static void
gf100_fifo_uevent_init
(
struct
nvkm_event
*
event
,
int
type
,
int
index
)
{
struct
nvkm_fifo
*
fifo
=
container_of
(
event
,
typeof
(
*
fifo
),
uevent
);
nv_mask
(
fifo
,
0x002140
,
0x80000000
,
0x80000000
);
struct
nvkm_device
*
device
=
fifo
->
engine
.
subdev
.
device
;
nvkm_mask
(
device
,
0x002140
,
0x80000000
,
0x80000000
);
}
static
void
gf100_fifo_uevent_fini
(
struct
nvkm_event
*
event
,
int
type
,
int
index
)
{
struct
nvkm_fifo
*
fifo
=
container_of
(
event
,
typeof
(
*
fifo
),
uevent
);
nv_mask
(
fifo
,
0x002140
,
0x80000000
,
0x00000000
);
struct
nvkm_device
*
device
=
fifo
->
engine
.
subdev
.
device
;
nvkm_mask
(
device
,
0x002140
,
0x80000000
,
0x00000000
);
}
static
const
struct
nvkm_event_func
...
...
@@ -917,41 +933,42 @@ static int
gf100_fifo_init
(
struct
nvkm_object
*
object
)
{
struct
gf100_fifo
*
fifo
=
(
void
*
)
object
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
int
ret
,
i
;
ret
=
nvkm_fifo_init
(
&
fifo
->
base
);
if
(
ret
)
return
ret
;
nv
_wr32
(
fifo
,
0x000204
,
0xffffffff
);
nv
_wr32
(
fifo
,
0x002204
,
0xffffffff
);
nv
km_wr32
(
device
,
0x000204
,
0xffffffff
);
nv
km_wr32
(
device
,
0x002204
,
0xffffffff
);
fifo
->
spoon_nr
=
hweight32
(
nv
_rd32
(
fifo
,
0x002204
));
fifo
->
spoon_nr
=
hweight32
(
nv
km_rd32
(
device
,
0x002204
));
nv_debug
(
fifo
,
"%d PBDMA unit(s)
\n
"
,
fifo
->
spoon_nr
);
/* assign engines to PBDMAs */
if
(
fifo
->
spoon_nr
>=
3
)
{
nv
_wr32
(
fifo
,
0x002208
,
~
(
1
<<
0
));
/* PGRAPH */
nv
_wr32
(
fifo
,
0x00220c
,
~
(
1
<<
1
));
/* PVP */
nv
_wr32
(
fifo
,
0x002210
,
~
(
1
<<
1
));
/* PMSPP */
nv
_wr32
(
fifo
,
0x002214
,
~
(
1
<<
1
));
/* PMSVLD */
nv
_wr32
(
fifo
,
0x002218
,
~
(
1
<<
2
));
/* PCE0 */
nv
_wr32
(
fifo
,
0x00221c
,
~
(
1
<<
1
));
/* PCE1 */
nv
km_wr32
(
device
,
0x002208
,
~
(
1
<<
0
));
/* PGRAPH */
nv
km_wr32
(
device
,
0x00220c
,
~
(
1
<<
1
));
/* PVP */
nv
km_wr32
(
device
,
0x002210
,
~
(
1
<<
1
));
/* PMSPP */
nv
km_wr32
(
device
,
0x002214
,
~
(
1
<<
1
));
/* PMSVLD */
nv
km_wr32
(
device
,
0x002218
,
~
(
1
<<
2
));
/* PCE0 */
nv
km_wr32
(
device
,
0x00221c
,
~
(
1
<<
1
));
/* PCE1 */
}
/* PBDMA[n] */
for
(
i
=
0
;
i
<
fifo
->
spoon_nr
;
i
++
)
{
nv
_mask
(
fifo
,
0x04013c
+
(
i
*
0x2000
),
0x10000100
,
0x00000000
);
nv
_wr32
(
fifo
,
0x040108
+
(
i
*
0x2000
),
0xffffffff
);
/* INTR */
nv
_wr32
(
fifo
,
0x04010c
+
(
i
*
0x2000
),
0xfffffeff
);
/* INTREN */
nv
km_mask
(
device
,
0x04013c
+
(
i
*
0x2000
),
0x10000100
,
0x00000000
);
nv
km_wr32
(
device
,
0x040108
+
(
i
*
0x2000
),
0xffffffff
);
/* INTR */
nv
km_wr32
(
device
,
0x04010c
+
(
i
*
0x2000
),
0xfffffeff
);
/* INTREN */
}
nv
_mask
(
fifo
,
0x002200
,
0x00000001
,
0x00000001
);
nv
_wr32
(
fifo
,
0x002254
,
0x10000000
|
fifo
->
user
.
bar
.
offset
>>
12
);
nv
km_mask
(
device
,
0x002200
,
0x00000001
,
0x00000001
);
nv
km_wr32
(
device
,
0x002254
,
0x10000000
|
fifo
->
user
.
bar
.
offset
>>
12
);
nv
_wr32
(
fifo
,
0x002100
,
0xffffffff
);
nv
_wr32
(
fifo
,
0x002140
,
0x7fffffff
);
nv
_wr32
(
fifo
,
0x002628
,
0x00000001
);
/* ENGINE_INTR_EN */
nv
km_wr32
(
device
,
0x002100
,
0xffffffff
);
nv
km_wr32
(
device
,
0x002140
,
0x7fffffff
);
nv
km_wr32
(
device
,
0x002628
,
0x00000001
);
/* ENGINE_INTR_EN */
return
0
;
}
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
View file @
87744403
...
...
@@ -95,8 +95,9 @@ struct gk104_fifo_chan {
static
void
gk104_fifo_runlist_update
(
struct
gk104_fifo
*
fifo
,
u32
engine
)
{
struct
nvkm_bar
*
bar
=
nvkm_bar
(
fifo
);
struct
gk104_fifo_engn
*
engn
=
&
fifo
->
engine
[
engine
];
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_bar
*
bar
=
device
->
bar
;
struct
nvkm_gpuobj
*
cur
;
int
i
,
p
;
...
...
@@ -114,10 +115,10 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
}
bar
->
flush
(
bar
);
nv
_wr32
(
fifo
,
0x002270
,
cur
->
addr
>>
12
);
nv
_wr32
(
fifo
,
0x002274
,
(
engine
<<
20
)
|
(
p
>>
3
));
nv
km_wr32
(
device
,
0x002270
,
cur
->
addr
>>
12
);
nv
km_wr32
(
device
,
0x002274
,
(
engine
<<
20
)
|
(
p
>>
3
));
if
(
wait_event_timeout
(
engn
->
wait
,
!
(
nv
_rd32
(
fifo
,
0x002284
+
if
(
wait_event_timeout
(
engn
->
wait
,
!
(
nv
km_rd32
(
device
,
0x002284
+
(
engine
*
0x08
))
&
0x00100000
),
msecs_to_jiffies
(
2000
))
==
0
)
nv_error
(
fifo
,
"runlist %d update timeout
\n
"
,
engine
);
...
...
@@ -170,8 +171,9 @@ gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
{
struct
nvkm_object
*
obj
=
(
void
*
)
chan
;
struct
gk104_fifo
*
fifo
=
(
void
*
)
obj
->
engine
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
nv
_wr32
(
fifo
,
0x002634
,
chan
->
base
.
chid
);
nv
km_wr32
(
device
,
0x002634
,
chan
->
base
.
chid
);
if
(
!
nv_wait
(
fifo
,
0x002634
,
0x100000
,
0x000000
))
{
nv_error
(
fifo
,
"channel %d [%s] kick timeout
\n
"
,
chan
->
base
.
chid
,
nvkm_client_name
(
chan
));
...
...
@@ -300,6 +302,7 @@ gk104_fifo_chan_init(struct nvkm_object *object)
struct
nvkm_gpuobj
*
base
=
nv_gpuobj
(
object
->
parent
);
struct
gk104_fifo
*
fifo
=
(
void
*
)
object
->
engine
;
struct
gk104_fifo_chan
*
chan
=
(
void
*
)
object
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
chid
=
chan
->
base
.
chid
;
int
ret
;
...
...
@@ -307,13 +310,13 @@ gk104_fifo_chan_init(struct nvkm_object *object)
if
(
ret
)
return
ret
;
nv
_mask
(
fifo
,
0x800004
+
(
chid
*
8
),
0x000f0000
,
chan
->
engine
<<
16
);
nv
_wr32
(
fifo
,
0x800000
+
(
chid
*
8
),
0x80000000
|
base
->
addr
>>
12
);
nv
km_mask
(
device
,
0x800004
+
(
chid
*
8
),
0x000f0000
,
chan
->
engine
<<
16
);
nv
km_wr32
(
device
,
0x800000
+
(
chid
*
8
),
0x80000000
|
base
->
addr
>>
12
);
if
(
chan
->
state
==
STOPPED
&&
(
chan
->
state
=
RUNNING
)
==
RUNNING
)
{
nv
_mask
(
fifo
,
0x800004
+
(
chid
*
8
),
0x00000400
,
0x00000400
);
nv
km_mask
(
device
,
0x800004
+
(
chid
*
8
),
0x00000400
,
0x00000400
);
gk104_fifo_runlist_update
(
fifo
,
chan
->
engine
);
nv
_mask
(
fifo
,
0x800004
+
(
chid
*
8
),
0x00000400
,
0x00000400
);
nv
km_mask
(
device
,
0x800004
+
(
chid
*
8
),
0x00000400
,
0x00000400
);
}
return
0
;
...
...
@@ -324,14 +327,15 @@ gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
{
struct
gk104_fifo
*
fifo
=
(
void
*
)
object
->
engine
;
struct
gk104_fifo_chan
*
chan
=
(
void
*
)
object
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
chid
=
chan
->
base
.
chid
;
if
(
chan
->
state
==
RUNNING
&&
(
chan
->
state
=
STOPPED
)
==
STOPPED
)
{
nv
_mask
(
fifo
,
0x800004
+
(
chid
*
8
),
0x00000800
,
0x00000800
);
nv
km_mask
(
device
,
0x800004
+
(
chid
*
8
),
0x00000800
,
0x00000800
);
gk104_fifo_runlist_update
(
fifo
,
chan
->
engine
);
}
nv
_wr32
(
fifo
,
0x800000
+
(
chid
*
8
),
0x00000000
);
nv
km_wr32
(
device
,
0x800000
+
(
chid
*
8
),
0x00000000
);
return
nvkm_fifo_channel_fini
(
&
chan
->
base
,
suspend
);
}
...
...
@@ -445,6 +449,7 @@ static void
gk104_fifo_recover_work
(
struct
work_struct
*
work
)
{
struct
gk104_fifo
*
fifo
=
container_of
(
work
,
typeof
(
*
fifo
),
fault
);
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_object
*
engine
;
unsigned
long
flags
;
u32
engn
,
engm
=
0
;
...
...
@@ -457,7 +462,7 @@ gk104_fifo_recover_work(struct work_struct *work)
for
(
todo
=
mask
;
engn
=
__ffs64
(
todo
),
todo
;
todo
&=
~
(
1
<<
engn
))
engm
|=
1
<<
gk104_fifo_engidx
(
fifo
,
engn
);
nv
_mask
(
fifo
,
0x002630
,
engm
,
engm
);
nv
km_mask
(
device
,
0x002630
,
engm
,
engm
);
for
(
todo
=
mask
;
engn
=
__ffs64
(
todo
),
todo
;
todo
&=
~
(
1
<<
engn
))
{
if
((
engine
=
(
void
*
)
nvkm_engine
(
fifo
,
engn
)))
{
...
...
@@ -467,21 +472,22 @@ gk104_fifo_recover_work(struct work_struct *work)
gk104_fifo_runlist_update
(
fifo
,
gk104_fifo_engidx
(
fifo
,
engn
));
}
nv
_wr32
(
fifo
,
0x00262c
,
engm
);
nv
_mask
(
fifo
,
0x002630
,
engm
,
0x00000000
);
nv
km_wr32
(
device
,
0x00262c
,
engm
);
nv
km_mask
(
device
,
0x002630
,
engm
,
0x00000000
);
}
static
void
gk104_fifo_recover
(
struct
gk104_fifo
*
fifo
,
struct
nvkm_engine
*
engine
,
struct
gk104_fifo_chan
*
chan
)
{
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
chid
=
chan
->
base
.
chid
;
unsigned
long
flags
;
nv_error
(
fifo
,
"%s engine fault on channel %d, recovering...
\n
"
,
nv_subdev
(
engine
)
->
name
,
chid
);
nv
_mask
(
fifo
,
0x800004
+
(
chid
*
0x08
),
0x00000800
,
0x00000800
);
nv
km_mask
(
device
,
0x800004
+
(
chid
*
0x08
),
0x00000800
,
0x00000800
);
chan
->
state
=
KILLED
;
spin_lock_irqsave
(
&
fifo
->
base
.
lock
,
flags
);
...
...
@@ -530,7 +536,8 @@ gk104_fifo_bind_reason[] = {
static
void
gk104_fifo_intr_bind
(
struct
gk104_fifo
*
fifo
)
{
u32
intr
=
nv_rd32
(
fifo
,
0x00252c
);
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
intr
=
nvkm_rd32
(
device
,
0x00252c
);
u32
code
=
intr
&
0x000000ff
;
const
struct
nvkm_enum
*
en
;
char
enunk
[
6
]
=
""
;
...
...
@@ -551,12 +558,13 @@ gk104_fifo_sched_reason[] = {
static
void
gk104_fifo_intr_sched_ctxsw
(
struct
gk104_fifo
*
fifo
)
{
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_engine
*
engine
;
struct
gk104_fifo_chan
*
chan
;
u32
engn
;
for
(
engn
=
0
;
engn
<
ARRAY_SIZE
(
fifo_engine
);
engn
++
)
{
u32
stat
=
nv
_rd32
(
fifo
,
0x002640
+
(
engn
*
0x04
));
u32
stat
=
nv
km_rd32
(
device
,
0x002640
+
(
engn
*
0x04
));
u32
busy
=
(
stat
&
0x80000000
);
u32
next
=
(
stat
&
0x07ff0000
)
>>
16
;
u32
chsw
=
(
stat
&
0x00008000
);
...
...
@@ -579,7 +587,8 @@ gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
static
void
gk104_fifo_intr_sched
(
struct
gk104_fifo
*
fifo
)
{
u32
intr
=
nv_rd32
(
fifo
,
0x00254c
);
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
intr
=
nvkm_rd32
(
device
,
0x00254c
);
u32
code
=
intr
&
0x000000ff
;
const
struct
nvkm_enum
*
en
;
char
enunk
[
6
]
=
""
;
...
...
@@ -602,15 +611,17 @@ gk104_fifo_intr_sched(struct gk104_fifo *fifo)
static
void
gk104_fifo_intr_chsw
(
struct
gk104_fifo
*
fifo
)
{
u32
stat
=
nv_rd32
(
fifo
,
0x00256c
);
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
stat
=
nvkm_rd32
(
device
,
0x00256c
);
nv_error
(
fifo
,
"CHSW_ERROR 0x%08x
\n
"
,
stat
);
nv
_wr32
(
fifo
,
0x00256c
,
stat
);
nv
km_wr32
(
device
,
0x00256c
,
stat
);
}
static
void
gk104_fifo_intr_dropped_fault
(
struct
gk104_fifo
*
fifo
)
{
u32
stat
=
nv_rd32
(
fifo
,
0x00259c
);
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
stat
=
nvkm_rd32
(
device
,
0x00259c
);
nv_error
(
fifo
,
"DROPPED_MMU_FAULT 0x%08x
\n
"
,
stat
);
}
...
...
@@ -722,10 +733,11 @@ gk104_fifo_fault_gpcclient[] = {
static
void
gk104_fifo_intr_fault
(
struct
gk104_fifo
*
fifo
,
int
unit
)
{
u32
inst
=
nv_rd32
(
fifo
,
0x002800
+
(
unit
*
0x10
));
u32
valo
=
nv_rd32
(
fifo
,
0x002804
+
(
unit
*
0x10
));
u32
vahi
=
nv_rd32
(
fifo
,
0x002808
+
(
unit
*
0x10
));
u32
stat
=
nv_rd32
(
fifo
,
0x00280c
+
(
unit
*
0x10
));
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
inst
=
nvkm_rd32
(
device
,
0x002800
+
(
unit
*
0x10
));
u32
valo
=
nvkm_rd32
(
device
,
0x002804
+
(
unit
*
0x10
));
u32
vahi
=
nvkm_rd32
(
device
,
0x002808
+
(
unit
*
0x10
));
u32
stat
=
nvkm_rd32
(
device
,
0x00280c
+
(
unit
*
0x10
));
u32
gpc
=
(
stat
&
0x1f000000
)
>>
24
;
u32
client
=
(
stat
&
0x00001f00
)
>>
8
;
u32
write
=
(
stat
&
0x00000080
);
...
...
@@ -747,13 +759,13 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
if
(
eu
)
{
switch
(
eu
->
data2
)
{
case
NVDEV_SUBDEV_BAR
:
nv
_mask
(
fifo
,
0x001704
,
0x00000000
,
0x00000000
);
nv
km_mask
(
device
,
0x001704
,
0x00000000
,
0x00000000
);
break
;
case
NVDEV_SUBDEV_INSTMEM
:
nv
_mask
(
fifo
,
0x001714
,
0x00000000
,
0x00000000
);
nv
km_mask
(
device
,
0x001714
,
0x00000000
,
0x00000000
);
break
;
case
NVDEV_ENGINE_IFB
:
nv
_mask
(
fifo
,
0x001718
,
0x00000000
,
0x00000000
);
nv
km_mask
(
device
,
0x001718
,
0x00000000
,
0x00000000
);
break
;
default:
engine
=
nvkm_engine
(
fifo
,
eu
->
data2
);
...
...
@@ -833,11 +845,12 @@ static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
static
void
gk104_fifo_intr_pbdma_0
(
struct
gk104_fifo
*
fifo
,
int
unit
)
{
u32
mask
=
nv_rd32
(
fifo
,
0x04010c
+
(
unit
*
0x2000
));
u32
stat
=
nv_rd32
(
fifo
,
0x040108
+
(
unit
*
0x2000
))
&
mask
;
u32
addr
=
nv_rd32
(
fifo
,
0x0400c0
+
(
unit
*
0x2000
));
u32
data
=
nv_rd32
(
fifo
,
0x0400c4
+
(
unit
*
0x2000
));
u32
chid
=
nv_rd32
(
fifo
,
0x040120
+
(
unit
*
0x2000
))
&
0xfff
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
mask
=
nvkm_rd32
(
device
,
0x04010c
+
(
unit
*
0x2000
));
u32
stat
=
nvkm_rd32
(
device
,
0x040108
+
(
unit
*
0x2000
))
&
mask
;
u32
addr
=
nvkm_rd32
(
device
,
0x0400c0
+
(
unit
*
0x2000
));
u32
data
=
nvkm_rd32
(
device
,
0x0400c4
+
(
unit
*
0x2000
));
u32
chid
=
nvkm_rd32
(
device
,
0x040120
+
(
unit
*
0x2000
))
&
0xfff
;
u32
subc
=
(
addr
&
0x00070000
)
>>
16
;
u32
mthd
=
(
addr
&
0x00003ffc
);
u32
show
=
stat
;
...
...
@@ -845,7 +858,7 @@ gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
if
(
stat
&
0x00800000
)
{
if
(
!
gk104_fifo_swmthd
(
fifo
,
chid
,
mthd
,
data
))
show
&=
~
0x00800000
;
nv
_wr32
(
fifo
,
0x0400c0
+
(
unit
*
0x2000
),
0x80600008
);
nv
km_wr32
(
device
,
0x0400c0
+
(
unit
*
0x2000
),
0x80600008
);
}
if
(
show
)
{
...
...
@@ -859,7 +872,7 @@ gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
subc
,
mthd
,
data
);
}
nv
_wr32
(
fifo
,
0x040108
+
(
unit
*
0x2000
),
stat
);
nv
km_wr32
(
device
,
0x040108
+
(
unit
*
0x2000
),
stat
);
}
static
const
struct
nvkm_bitfield
gk104_fifo_pbdma_intr_1
[]
=
{
...
...
@@ -874,30 +887,32 @@ static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
static
void
gk104_fifo_intr_pbdma_1
(
struct
gk104_fifo
*
fifo
,
int
unit
)
{
u32
mask
=
nv_rd32
(
fifo
,
0x04014c
+
(
unit
*
0x2000
));
u32
stat
=
nv_rd32
(
fifo
,
0x040148
+
(
unit
*
0x2000
))
&
mask
;
u32
chid
=
nv_rd32
(
fifo
,
0x040120
+
(
unit
*
0x2000
))
&
0xfff
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
mask
=
nvkm_rd32
(
device
,
0x04014c
+
(
unit
*
0x2000
));
u32
stat
=
nvkm_rd32
(
device
,
0x040148
+
(
unit
*
0x2000
))
&
mask
;
u32
chid
=
nvkm_rd32
(
device
,
0x040120
+
(
unit
*
0x2000
))
&
0xfff
;
if
(
stat
)
{
nv_error
(
fifo
,
"PBDMA%d:"
,
unit
);
nvkm_bitfield_print
(
gk104_fifo_pbdma_intr_1
,
stat
);
pr_cont
(
"
\n
"
);
nv_error
(
fifo
,
"PBDMA%d: ch %d %08x %08x
\n
"
,
unit
,
chid
,
nv
_rd32
(
fifo
,
0x040150
+
(
unit
*
0x2000
)),
nv
_rd32
(
fifo
,
0x040154
+
(
unit
*
0x2000
)));
nv
km_rd32
(
device
,
0x040150
+
(
unit
*
0x2000
)),
nv
km_rd32
(
device
,
0x040154
+
(
unit
*
0x2000
)));
}
nv
_wr32
(
fifo
,
0x040148
+
(
unit
*
0x2000
),
stat
);
nv
km_wr32
(
device
,
0x040148
+
(
unit
*
0x2000
),
stat
);
}
static
void
gk104_fifo_intr_runlist
(
struct
gk104_fifo
*
fifo
)
{
u32
mask
=
nv_rd32
(
fifo
,
0x002a00
);
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
mask
=
nvkm_rd32
(
device
,
0x002a00
);
while
(
mask
)
{
u32
engn
=
__ffs
(
mask
);
wake_up
(
&
fifo
->
engine
[
engn
].
wait
);
nv
_wr32
(
fifo
,
0x002a00
,
1
<<
engn
);
nv
km_wr32
(
device
,
0x002a00
,
1
<<
engn
);
mask
&=
~
(
1
<<
engn
);
}
}
...
...
@@ -912,69 +927,70 @@ static void
gk104_fifo_intr
(
struct
nvkm_subdev
*
subdev
)
{
struct
gk104_fifo
*
fifo
=
(
void
*
)
subdev
;
u32
mask
=
nv_rd32
(
fifo
,
0x002140
);
u32
stat
=
nv_rd32
(
fifo
,
0x002100
)
&
mask
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
mask
=
nvkm_rd32
(
device
,
0x002140
);
u32
stat
=
nvkm_rd32
(
device
,
0x002100
)
&
mask
;
if
(
stat
&
0x00000001
)
{
gk104_fifo_intr_bind
(
fifo
);
nv
_wr32
(
fifo
,
0x002100
,
0x00000001
);
nv
km_wr32
(
device
,
0x002100
,
0x00000001
);
stat
&=
~
0x00000001
;
}
if
(
stat
&
0x00000010
)
{
nv_error
(
fifo
,
"PIO_ERROR
\n
"
);
nv
_wr32
(
fifo
,
0x002100
,
0x00000010
);
nv
km_wr32
(
device
,
0x002100
,
0x00000010
);
stat
&=
~
0x00000010
;
}
if
(
stat
&
0x00000100
)
{
gk104_fifo_intr_sched
(
fifo
);
nv
_wr32
(
fifo
,
0x002100
,
0x00000100
);
nv
km_wr32
(
device
,
0x002100
,
0x00000100
);
stat
&=
~
0x00000100
;
}
if
(
stat
&
0x00010000
)
{
gk104_fifo_intr_chsw
(
fifo
);
nv
_wr32
(
fifo
,
0x002100
,
0x00010000
);
nv
km_wr32
(
device
,
0x002100
,
0x00010000
);
stat
&=
~
0x00010000
;
}
if
(
stat
&
0x00800000
)
{
nv_error
(
fifo
,
"FB_FLUSH_TIMEOUT
\n
"
);
nv
_wr32
(
fifo
,
0x002100
,
0x00800000
);
nv
km_wr32
(
device
,
0x002100
,
0x00800000
);
stat
&=
~
0x00800000
;
}
if
(
stat
&
0x01000000
)
{
nv_error
(
fifo
,
"LB_ERROR
\n
"
);
nv
_wr32
(
fifo
,
0x002100
,
0x01000000
);
nv
km_wr32
(
device
,
0x002100
,
0x01000000
);
stat
&=
~
0x01000000
;
}
if
(
stat
&
0x08000000
)
{
gk104_fifo_intr_dropped_fault
(
fifo
);
nv
_wr32
(
fifo
,
0x002100
,
0x08000000
);
nv
km_wr32
(
device
,
0x002100
,
0x08000000
);
stat
&=
~
0x08000000
;
}
if
(
stat
&
0x10000000
)
{
u32
mask
=
nv
_rd32
(
fifo
,
0x00259c
);
u32
mask
=
nv
km_rd32
(
device
,
0x00259c
);
while
(
mask
)
{
u32
unit
=
__ffs
(
mask
);
gk104_fifo_intr_fault
(
fifo
,
unit
);
nv
_wr32
(
fifo
,
0x00259c
,
(
1
<<
unit
));
nv
km_wr32
(
device
,
0x00259c
,
(
1
<<
unit
));
mask
&=
~
(
1
<<
unit
);
}
stat
&=
~
0x10000000
;
}
if
(
stat
&
0x20000000
)
{
u32
mask
=
nv
_rd32
(
fifo
,
0x0025a0
);
u32
mask
=
nv
km_rd32
(
device
,
0x0025a0
);
while
(
mask
)
{
u32
unit
=
__ffs
(
mask
);
gk104_fifo_intr_pbdma_0
(
fifo
,
unit
);
gk104_fifo_intr_pbdma_1
(
fifo
,
unit
);
nv
_wr32
(
fifo
,
0x0025a0
,
(
1
<<
unit
));
nv
km_wr32
(
device
,
0x0025a0
,
(
1
<<
unit
));
mask
&=
~
(
1
<<
unit
);
}
stat
&=
~
0x20000000
;
...
...
@@ -986,15 +1002,15 @@ gk104_fifo_intr(struct nvkm_subdev *subdev)
}
if
(
stat
&
0x80000000
)
{
nv
_wr32
(
fifo
,
0x002100
,
0x80000000
);
nv
km_wr32
(
device
,
0x002100
,
0x80000000
);
gk104_fifo_intr_engine
(
fifo
);
stat
&=
~
0x80000000
;
}
if
(
stat
)
{
nv_error
(
fifo
,
"INTR 0x%08x
\n
"
,
stat
);
nv
_mask
(
fifo
,
0x002140
,
stat
,
0x00000000
);
nv
_wr32
(
fifo
,
0x002100
,
stat
);
nv
km_mask
(
device
,
0x002140
,
stat
,
0x00000000
);
nv
km_wr32
(
device
,
0x002100
,
stat
);
}
}
...
...
@@ -1002,14 +1018,16 @@ static void
gk104_fifo_uevent_init
(
struct
nvkm_event
*
event
,
int
type
,
int
index
)
{
struct
nvkm_fifo
*
fifo
=
container_of
(
event
,
typeof
(
*
fifo
),
uevent
);
nv_mask
(
fifo
,
0x002140
,
0x80000000
,
0x80000000
);
struct
nvkm_device
*
device
=
fifo
->
engine
.
subdev
.
device
;
nvkm_mask
(
device
,
0x002140
,
0x80000000
,
0x80000000
);
}
static
void
gk104_fifo_uevent_fini
(
struct
nvkm_event
*
event
,
int
type
,
int
index
)
{
struct
nvkm_fifo
*
fifo
=
container_of
(
event
,
typeof
(
*
fifo
),
uevent
);
nv_mask
(
fifo
,
0x002140
,
0x80000000
,
0x00000000
);
struct
nvkm_device
*
device
=
fifo
->
engine
.
subdev
.
device
;
nvkm_mask
(
device
,
0x002140
,
0x80000000
,
0x00000000
);
}
static
const
struct
nvkm_event_func
...
...
@@ -1023,6 +1041,7 @@ int
gk104_fifo_fini
(
struct
nvkm_object
*
object
,
bool
suspend
)
{
struct
gk104_fifo
*
fifo
=
(
void
*
)
object
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
int
ret
;
ret
=
nvkm_fifo_fini
(
&
fifo
->
base
,
suspend
);
...
...
@@ -1030,7 +1049,7 @@ gk104_fifo_fini(struct nvkm_object *object, bool suspend)
return
ret
;
/* allow mmu fault interrupts, even when we're not using fifo */
nv
_mask
(
fifo
,
0x002140
,
0x10000000
,
0x10000000
);
nv
km_mask
(
device
,
0x002140
,
0x10000000
,
0x10000000
);
return
0
;
}
...
...
@@ -1038,6 +1057,7 @@ int
gk104_fifo_init
(
struct
nvkm_object
*
object
)
{
struct
gk104_fifo
*
fifo
=
(
void
*
)
object
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
int
ret
,
i
;
ret
=
nvkm_fifo_init
(
&
fifo
->
base
);
...
...
@@ -1045,27 +1065,27 @@ gk104_fifo_init(struct nvkm_object *object)
return
ret
;
/* enable all available PBDMA units */
nv
_wr32
(
fifo
,
0x000204
,
0xffffffff
);
fifo
->
spoon_nr
=
hweight32
(
nv
_rd32
(
fifo
,
0x000204
));
nv
km_wr32
(
device
,
0x000204
,
0xffffffff
);
fifo
->
spoon_nr
=
hweight32
(
nv
km_rd32
(
device
,
0x000204
));
nv_debug
(
fifo
,
"%d PBDMA unit(s)
\n
"
,
fifo
->
spoon_nr
);
/* PBDMA[n] */
for
(
i
=
0
;
i
<
fifo
->
spoon_nr
;
i
++
)
{
nv
_mask
(
fifo
,
0x04013c
+
(
i
*
0x2000
),
0x10000100
,
0x00000000
);
nv
_wr32
(
fifo
,
0x040108
+
(
i
*
0x2000
),
0xffffffff
);
/* INTR */
nv
_wr32
(
fifo
,
0x04010c
+
(
i
*
0x2000
),
0xfffffeff
);
/* INTREN */
nv
km_mask
(
device
,
0x04013c
+
(
i
*
0x2000
),
0x10000100
,
0x00000000
);
nv
km_wr32
(
device
,
0x040108
+
(
i
*
0x2000
),
0xffffffff
);
/* INTR */
nv
km_wr32
(
device
,
0x04010c
+
(
i
*
0x2000
),
0xfffffeff
);
/* INTREN */
}
/* PBDMA[n].HCE */
for
(
i
=
0
;
i
<
fifo
->
spoon_nr
;
i
++
)
{
nv
_wr32
(
fifo
,
0x040148
+
(
i
*
0x2000
),
0xffffffff
);
/* INTR */
nv
_wr32
(
fifo
,
0x04014c
+
(
i
*
0x2000
),
0xffffffff
);
/* INTREN */
nv
km_wr32
(
device
,
0x040148
+
(
i
*
0x2000
),
0xffffffff
);
/* INTR */
nv
km_wr32
(
device
,
0x04014c
+
(
i
*
0x2000
),
0xffffffff
);
/* INTREN */
}
nv
_wr32
(
fifo
,
0x002254
,
0x10000000
|
fifo
->
user
.
bar
.
offset
>>
12
);
nv
km_wr32
(
device
,
0x002254
,
0x10000000
|
fifo
->
user
.
bar
.
offset
>>
12
);
nv
_wr32
(
fifo
,
0x002100
,
0xffffffff
);
nv
_wr32
(
fifo
,
0x002140
,
0x7fffffff
);
nv
km_wr32
(
device
,
0x002100
,
0xffffffff
);
nv
km_wr32
(
device
,
0x002140
,
0x7fffffff
);
return
0
;
}
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
View file @
87744403
...
...
@@ -174,6 +174,7 @@ nv04_fifo_chan_init(struct nvkm_object *object)
{
struct
nv04_fifo
*
fifo
=
(
void
*
)
object
->
engine
;
struct
nv04_fifo_chan
*
chan
=
(
void
*
)
object
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
mask
=
1
<<
chan
->
base
.
chid
;
unsigned
long
flags
;
int
ret
;
...
...
@@ -183,7 +184,7 @@ nv04_fifo_chan_init(struct nvkm_object *object)
return
ret
;
spin_lock_irqsave
(
&
fifo
->
base
.
lock
,
flags
);
nv
_mask
(
fifo
,
NV04_PFIFO_MODE
,
mask
,
mask
);
nv
km_mask
(
device
,
NV04_PFIFO_MODE
,
mask
,
mask
);
spin_unlock_irqrestore
(
&
fifo
->
base
.
lock
,
flags
);
return
0
;
}
...
...
@@ -194,6 +195,7 @@ nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
struct
nv04_fifo
*
fifo
=
(
void
*
)
object
->
engine
;
struct
nv04_fifo_chan
*
chan
=
(
void
*
)
object
;
struct
nvkm_gpuobj
*
fctx
=
fifo
->
ramfc
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
ramfc_desc
*
c
;
unsigned
long
flags
;
u32
data
=
chan
->
ramfc
;
...
...
@@ -201,39 +203,39 @@ nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
/* prevent fifo context switches */
spin_lock_irqsave
(
&
fifo
->
base
.
lock
,
flags
);
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHES
,
0
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHES
,
0
);
/* if this channel is active, replace it with a null context */
chid
=
nv
_rd32
(
fifo
,
NV03_PFIFO_CACHE1_PUSH1
)
&
fifo
->
base
.
max
;
chid
=
nv
km_rd32
(
device
,
NV03_PFIFO_CACHE1_PUSH1
)
&
fifo
->
base
.
max
;
if
(
chid
==
chan
->
base
.
chid
)
{
nv
_mask
(
fifo
,
NV04_PFIFO_CACHE1_DMA_PUSH
,
0x00000001
,
0
);
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHE1_PUSH0
,
0
);
nv
_mask
(
fifo
,
NV04_PFIFO_CACHE1_PULL0
,
0x00000001
,
0
);
nv
km_mask
(
device
,
NV04_PFIFO_CACHE1_DMA_PUSH
,
0x00000001
,
0
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH0
,
0
);
nv
km_mask
(
device
,
NV04_PFIFO_CACHE1_PULL0
,
0x00000001
,
0
);
c
=
fifo
->
ramfc_desc
;
do
{
u32
rm
=
((
1ULL
<<
c
->
bits
)
-
1
)
<<
c
->
regs
;
u32
cm
=
((
1ULL
<<
c
->
bits
)
-
1
)
<<
c
->
ctxs
;
u32
rv
=
(
nv
_rd32
(
fifo
,
c
->
regp
)
&
rm
)
>>
c
->
regs
;
u32
rv
=
(
nv
km_rd32
(
device
,
c
->
regp
)
&
rm
)
>>
c
->
regs
;
u32
cv
=
(
nv_ro32
(
fctx
,
c
->
ctxp
+
data
)
&
~
cm
);
nv_wo32
(
fctx
,
c
->
ctxp
+
data
,
cv
|
(
rv
<<
c
->
ctxs
));
}
while
((
++
c
)
->
bits
);
c
=
fifo
->
ramfc_desc
;
do
{
nv
_wr32
(
fifo
,
c
->
regp
,
0x00000000
);
nv
km_wr32
(
device
,
c
->
regp
,
0x00000000
);
}
while
((
++
c
)
->
bits
);
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHE1_GET
,
0
);
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHE1_PUT
,
0
);
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHE1_PUSH1
,
fifo
->
base
.
max
);
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHE1_PUSH0
,
1
);
nv
_wr32
(
fifo
,
NV04_PFIFO_CACHE1_PULL0
,
1
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHE1_GET
,
0
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHE1_PUT
,
0
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH1
,
fifo
->
base
.
max
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH0
,
1
);
nv
km_wr32
(
device
,
NV04_PFIFO_CACHE1_PULL0
,
1
);
}
/* restore normal operation, after disabling dma mode */
nv
_mask
(
fifo
,
NV04_PFIFO_MODE
,
1
<<
chan
->
base
.
chid
,
0
);
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHES
,
1
);
nv
km_mask
(
device
,
NV04_PFIFO_MODE
,
1
<<
chan
->
base
.
chid
,
0
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHES
,
1
);
spin_unlock_irqrestore
(
&
fifo
->
base
.
lock
,
flags
);
return
nvkm_fifo_channel_fini
(
&
chan
->
base
,
suspend
);
...
...
@@ -301,13 +303,14 @@ nv04_fifo_pause(struct nvkm_fifo *obj, unsigned long *pflags)
__acquires
(
fifo
->
base
.
lock
)
{
struct
nv04_fifo
*
fifo
=
container_of
(
obj
,
typeof
(
*
fifo
),
base
);
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
fifo
->
base
.
lock
,
flags
);
*
pflags
=
flags
;
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHES
,
0x00000000
);
nv
_mask
(
fifo
,
NV04_PFIFO_CACHE1_PULL0
,
0x00000001
,
0x00000000
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHES
,
0x00000000
);
nv
km_mask
(
device
,
NV04_PFIFO_CACHE1_PULL0
,
0x00000001
,
0x00000000
);
/* in some cases the puller may be left in an inconsistent state
* if you try to stop it while it's busy translating handles.
...
...
@@ -322,11 +325,11 @@ __acquires(fifo->base.lock)
NV04_PFIFO_CACHE1_PULL0_HASH_BUSY
,
0x00000000
))
nv_warn
(
fifo
,
"timeout idling puller
\n
"
);
if
(
nv
_rd32
(
fifo
,
NV04_PFIFO_CACHE1_PULL0
)
&
if
(
nv
km_rd32
(
device
,
NV04_PFIFO_CACHE1_PULL0
)
&
NV04_PFIFO_CACHE1_PULL0_HASH_FAILED
)
nv
_wr32
(
fifo
,
NV03_PFIFO_INTR_0
,
NV_PFIFO_INTR_CACHE_ERROR
);
nv
km_wr32
(
device
,
NV03_PFIFO_INTR_0
,
NV_PFIFO_INTR_CACHE_ERROR
);
nv
_wr32
(
fifo
,
NV04_PFIFO_CACHE1_HASH
,
0x00000000
);
nv
km_wr32
(
device
,
NV04_PFIFO_CACHE1_HASH
,
0x00000000
);
}
void
...
...
@@ -334,10 +337,11 @@ nv04_fifo_start(struct nvkm_fifo *obj, unsigned long *pflags)
__releases
(
fifo
->
base
.
lock
)
{
struct
nv04_fifo
*
fifo
=
container_of
(
obj
,
typeof
(
*
fifo
),
base
);
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
unsigned
long
flags
=
*
pflags
;
nv
_mask
(
fifo
,
NV04_PFIFO_CACHE1_PULL0
,
0x00000001
,
0x00000001
);
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHES
,
0x00000001
);
nv
km_mask
(
device
,
NV04_PFIFO_CACHE1_PULL0
,
0x00000001
,
0x00000001
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHES
,
0x00000001
);
spin_unlock_irqrestore
(
&
fifo
->
base
.
lock
,
flags
);
}
...
...
@@ -355,6 +359,7 @@ nv_dma_state_err(u32 state)
static
bool
nv04_fifo_swmthd
(
struct
nv04_fifo
*
fifo
,
u32
chid
,
u32
addr
,
u32
data
)
{
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nv04_fifo_chan
*
chan
=
NULL
;
struct
nvkm_handle
*
bind
;
const
int
subc
=
(
addr
>>
13
)
&
0x7
;
...
...
@@ -380,13 +385,13 @@ nv04_fifo_swmthd(struct nv04_fifo *fifo, u32 chid, u32 addr, u32 data)
chan
->
subc
[
subc
]
=
data
;
handled
=
true
;
nv
_mask
(
fifo
,
NV04_PFIFO_CACHE1_ENGINE
,
engine
,
0
);
nv
km_mask
(
device
,
NV04_PFIFO_CACHE1_ENGINE
,
engine
,
0
);
}
nvkm_namedb_put
(
bind
);
break
;
default:
engine
=
nv
_rd32
(
fifo
,
NV04_PFIFO_CACHE1_ENGINE
);
engine
=
nv
km_rd32
(
device
,
NV04_PFIFO_CACHE1_ENGINE
);
if
(
unlikely
(((
engine
>>
(
subc
*
4
))
&
0xf
)
!=
0
))
break
;
...
...
@@ -419,11 +424,11 @@ nv04_fifo_cache_error(struct nvkm_device *device,
ptr
=
(
get
&
0x7ff
)
>>
2
;
if
(
device
->
card_type
<
NV_40
)
{
mthd
=
nv
_rd32
(
fifo
,
NV04_PFIFO_CACHE1_METHOD
(
ptr
));
data
=
nv
_rd32
(
fifo
,
NV04_PFIFO_CACHE1_DATA
(
ptr
));
mthd
=
nv
km_rd32
(
device
,
NV04_PFIFO_CACHE1_METHOD
(
ptr
));
data
=
nv
km_rd32
(
device
,
NV04_PFIFO_CACHE1_DATA
(
ptr
));
}
else
{
mthd
=
nv
_rd32
(
fifo
,
NV40_PFIFO_CACHE1_METHOD
(
ptr
));
data
=
nv
_rd32
(
fifo
,
NV40_PFIFO_CACHE1_DATA
(
ptr
));
mthd
=
nv
km_rd32
(
device
,
NV40_PFIFO_CACHE1_METHOD
(
ptr
));
data
=
nv
km_rd32
(
device
,
NV40_PFIFO_CACHE1_DATA
(
ptr
));
}
if
(
!
nv04_fifo_swmthd
(
fifo
,
chid
,
mthd
,
data
))
{
...
...
@@ -435,19 +440,19 @@ nv04_fifo_cache_error(struct nvkm_device *device,
data
);
}
nv
_wr32
(
fifo
,
NV04_PFIFO_CACHE1_DMA_PUSH
,
0
);
nv
_wr32
(
fifo
,
NV03_PFIFO_INTR_0
,
NV_PFIFO_INTR_CACHE_ERROR
);
nv
km_wr32
(
device
,
NV04_PFIFO_CACHE1_DMA_PUSH
,
0
);
nv
km_wr32
(
device
,
NV03_PFIFO_INTR_0
,
NV_PFIFO_INTR_CACHE_ERROR
);
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHE1_PUSH0
,
nv
_rd32
(
fifo
,
NV03_PFIFO_CACHE1_PUSH0
)
&
~
1
);
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHE1_GET
,
get
+
4
);
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHE1_PUSH0
,
nv
_rd32
(
fifo
,
NV03_PFIFO_CACHE1_PUSH0
)
|
1
);
nv
_wr32
(
fifo
,
NV04_PFIFO_CACHE1_HASH
,
0
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH0
,
nv
km_rd32
(
device
,
NV03_PFIFO_CACHE1_PUSH0
)
&
~
1
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHE1_GET
,
get
+
4
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH0
,
nv
km_rd32
(
device
,
NV03_PFIFO_CACHE1_PUSH0
)
|
1
);
nv
km_wr32
(
device
,
NV04_PFIFO_CACHE1_HASH
,
0
);
nv
_wr32
(
fifo
,
NV04_PFIFO_CACHE1_DMA_PUSH
,
nv
_rd32
(
fifo
,
NV04_PFIFO_CACHE1_DMA_PUSH
)
|
1
);
nv
_wr32
(
fifo
,
NV04_PFIFO_CACHE1_PULL0
,
1
);
nv
km_wr32
(
device
,
NV04_PFIFO_CACHE1_DMA_PUSH
,
nv
km_rd32
(
device
,
NV04_PFIFO_CACHE1_DMA_PUSH
)
|
1
);
nv
km_wr32
(
device
,
NV04_PFIFO_CACHE1_PULL0
,
1
);
}
static
void
...
...
@@ -455,18 +460,18 @@ nv04_fifo_dma_pusher(struct nvkm_device *device,
struct
nv04_fifo
*
fifo
,
u32
chid
)
{
const
char
*
client_name
;
u32
dma_get
=
nv
_rd32
(
fifo
,
0x003244
);
u32
dma_put
=
nv
_rd32
(
fifo
,
0x003240
);
u32
push
=
nv
_rd32
(
fifo
,
0x003220
);
u32
state
=
nv
_rd32
(
fifo
,
0x003228
);
u32
dma_get
=
nv
km_rd32
(
device
,
0x003244
);
u32
dma_put
=
nv
km_rd32
(
device
,
0x003240
);
u32
push
=
nv
km_rd32
(
device
,
0x003220
);
u32
state
=
nv
km_rd32
(
device
,
0x003228
);
client_name
=
nvkm_client_name_for_fifo_chid
(
&
fifo
->
base
,
chid
);
if
(
device
->
card_type
==
NV_50
)
{
u32
ho_get
=
nv
_rd32
(
fifo
,
0x003328
);
u32
ho_put
=
nv
_rd32
(
fifo
,
0x003320
);
u32
ib_get
=
nv
_rd32
(
fifo
,
0x003334
);
u32
ib_put
=
nv
_rd32
(
fifo
,
0x003330
);
u32
ho_get
=
nv
km_rd32
(
device
,
0x003328
);
u32
ho_put
=
nv
km_rd32
(
device
,
0x003320
);
u32
ib_get
=
nv
km_rd32
(
device
,
0x003334
);
u32
ib_put
=
nv
km_rd32
(
device
,
0x003330
);
nv_error
(
fifo
,
"DMA_PUSHER - ch %d [%s] get 0x%02x%08x put 0x%02x%08x ib_get 0x%08x ib_put 0x%08x state 0x%08x (err: %s) push 0x%08x
\n
"
,
...
...
@@ -474,13 +479,13 @@ nv04_fifo_dma_pusher(struct nvkm_device *device,
ib_get
,
ib_put
,
state
,
nv_dma_state_err
(
state
),
push
);
/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
nv
_wr32
(
fifo
,
0x003364
,
0x00000000
);
nv
km_wr32
(
device
,
0x003364
,
0x00000000
);
if
(
dma_get
!=
dma_put
||
ho_get
!=
ho_put
)
{
nv
_wr32
(
fifo
,
0x003244
,
dma_put
);
nv
_wr32
(
fifo
,
0x003328
,
ho_put
);
nv
km_wr32
(
device
,
0x003244
,
dma_put
);
nv
km_wr32
(
device
,
0x003328
,
ho_put
);
}
else
if
(
ib_get
!=
ib_put
)
nv
_wr32
(
fifo
,
0x003334
,
ib_put
);
nv
km_wr32
(
device
,
0x003334
,
ib_put
);
}
else
{
nv_error
(
fifo
,
"DMA_PUSHER - ch %d [%s] get 0x%08x put 0x%08x state 0x%08x (err: %s) push 0x%08x
\n
"
,
...
...
@@ -488,12 +493,12 @@ nv04_fifo_dma_pusher(struct nvkm_device *device,
nv_dma_state_err
(
state
),
push
);
if
(
dma_get
!=
dma_put
)
nv
_wr32
(
fifo
,
0x003244
,
dma_put
);
nv
km_wr32
(
device
,
0x003244
,
dma_put
);
}
nv
_wr32
(
fifo
,
0x003228
,
0x00000000
);
nv
_wr32
(
fifo
,
0x003220
,
0x00000001
);
nv
_wr32
(
fifo
,
0x002100
,
NV_PFIFO_INTR_DMA_PUSHER
);
nv
km_wr32
(
device
,
0x003228
,
0x00000000
);
nv
km_wr32
(
device
,
0x003220
,
0x00000001
);
nv
km_wr32
(
device
,
0x002100
,
NV_PFIFO_INTR_DMA_PUSHER
);
}
void
...
...
@@ -501,15 +506,15 @@ nv04_fifo_intr(struct nvkm_subdev *subdev)
{
struct
nvkm_device
*
device
=
nv_device
(
subdev
);
struct
nv04_fifo
*
fifo
=
(
void
*
)
subdev
;
u32
mask
=
nv
_rd32
(
fifo
,
NV03_PFIFO_INTR_EN_0
);
u32
stat
=
nv
_rd32
(
fifo
,
NV03_PFIFO_INTR_0
)
&
mask
;
u32
mask
=
nv
km_rd32
(
device
,
NV03_PFIFO_INTR_EN_0
);
u32
stat
=
nv
km_rd32
(
device
,
NV03_PFIFO_INTR_0
)
&
mask
;
u32
reassign
,
chid
,
get
,
sem
;
reassign
=
nv
_rd32
(
fifo
,
NV03_PFIFO_CACHES
)
&
1
;
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHES
,
0
);
reassign
=
nv
km_rd32
(
device
,
NV03_PFIFO_CACHES
)
&
1
;
nv
km_wr32
(
device
,
NV03_PFIFO_CACHES
,
0
);
chid
=
nv
_rd32
(
fifo
,
NV03_PFIFO_CACHE1_PUSH1
)
&
fifo
->
base
.
max
;
get
=
nv
_rd32
(
fifo
,
NV03_PFIFO_CACHE1_GET
);
chid
=
nv
km_rd32
(
device
,
NV03_PFIFO_CACHE1_PUSH1
)
&
fifo
->
base
.
max
;
get
=
nv
km_rd32
(
device
,
NV03_PFIFO_CACHE1_GET
);
if
(
stat
&
NV_PFIFO_INTR_CACHE_ERROR
)
{
nv04_fifo_cache_error
(
device
,
fifo
,
chid
,
get
);
...
...
@@ -523,23 +528,23 @@ nv04_fifo_intr(struct nvkm_subdev *subdev)
if
(
stat
&
NV_PFIFO_INTR_SEMAPHORE
)
{
stat
&=
~
NV_PFIFO_INTR_SEMAPHORE
;
nv
_wr32
(
fifo
,
NV03_PFIFO_INTR_0
,
NV_PFIFO_INTR_SEMAPHORE
);
nv
km_wr32
(
device
,
NV03_PFIFO_INTR_0
,
NV_PFIFO_INTR_SEMAPHORE
);
sem
=
nv
_rd32
(
fifo
,
NV10_PFIFO_CACHE1_SEMAPHORE
);
nv
_wr32
(
fifo
,
NV10_PFIFO_CACHE1_SEMAPHORE
,
sem
|
0x1
);
sem
=
nv
km_rd32
(
device
,
NV10_PFIFO_CACHE1_SEMAPHORE
);
nv
km_wr32
(
device
,
NV10_PFIFO_CACHE1_SEMAPHORE
,
sem
|
0x1
);
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHE1_GET
,
get
+
4
);
nv
_wr32
(
fifo
,
NV04_PFIFO_CACHE1_PULL0
,
1
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHE1_GET
,
get
+
4
);
nv
km_wr32
(
device
,
NV04_PFIFO_CACHE1_PULL0
,
1
);
}
if
(
device
->
card_type
==
NV_50
)
{
if
(
stat
&
0x00000010
)
{
stat
&=
~
0x00000010
;
nv
_wr32
(
fifo
,
0x002100
,
0x00000010
);
nv
km_wr32
(
device
,
0x002100
,
0x00000010
);
}
if
(
stat
&
0x40000000
)
{
nv
_wr32
(
fifo
,
0x002100
,
0x40000000
);
nv
km_wr32
(
device
,
0x002100
,
0x40000000
);
nvkm_fifo_uevent
(
&
fifo
->
base
);
stat
&=
~
0x40000000
;
}
...
...
@@ -547,11 +552,11 @@ nv04_fifo_intr(struct nvkm_subdev *subdev)
if
(
stat
)
{
nv_warn
(
fifo
,
"unknown intr 0x%08x
\n
"
,
stat
);
nv
_mask
(
fifo
,
NV03_PFIFO_INTR_EN_0
,
stat
,
0x00000000
);
nv
_wr32
(
fifo
,
NV03_PFIFO_INTR_0
,
stat
);
nv
km_mask
(
device
,
NV03_PFIFO_INTR_EN_0
,
stat
,
0x00000000
);
nv
km_wr32
(
device
,
NV03_PFIFO_INTR_0
,
stat
);
}
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHES
,
reassign
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHES
,
reassign
);
}
static
int
...
...
@@ -596,29 +601,30 @@ int
nv04_fifo_init
(
struct
nvkm_object
*
object
)
{
struct
nv04_fifo
*
fifo
=
(
void
*
)
object
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
int
ret
;
ret
=
nvkm_fifo_init
(
&
fifo
->
base
);
if
(
ret
)
return
ret
;
nv
_wr32
(
fifo
,
NV04_PFIFO_DELAY_0
,
0x000000ff
);
nv
_wr32
(
fifo
,
NV04_PFIFO_DMA_TIMESLICE
,
0x0101ffff
);
nv
km_wr32
(
device
,
NV04_PFIFO_DELAY_0
,
0x000000ff
);
nv
km_wr32
(
device
,
NV04_PFIFO_DMA_TIMESLICE
,
0x0101ffff
);
nv
_wr32
(
fifo
,
NV03_PFIFO_RAMHT
,
(
0x03
<<
24
)
/* search 128 */
|
nv
km_wr32
(
device
,
NV03_PFIFO_RAMHT
,
(
0x03
<<
24
)
/* search 128 */
|
((
fifo
->
ramht
->
bits
-
9
)
<<
16
)
|
(
fifo
->
ramht
->
gpuobj
.
addr
>>
8
));
nv
_wr32
(
fifo
,
NV03_PFIFO_RAMRO
,
fifo
->
ramro
->
addr
>>
8
);
nv
_wr32
(
fifo
,
NV03_PFIFO_RAMFC
,
fifo
->
ramfc
->
addr
>>
8
);
nv
km_wr32
(
device
,
NV03_PFIFO_RAMRO
,
fifo
->
ramro
->
addr
>>
8
);
nv
km_wr32
(
device
,
NV03_PFIFO_RAMFC
,
fifo
->
ramfc
->
addr
>>
8
);
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHE1_PUSH1
,
fifo
->
base
.
max
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH1
,
fifo
->
base
.
max
);
nv
_wr32
(
fifo
,
NV03_PFIFO_INTR_0
,
0xffffffff
);
nv
_wr32
(
fifo
,
NV03_PFIFO_INTR_EN_0
,
0xffffffff
);
nv
km_wr32
(
device
,
NV03_PFIFO_INTR_0
,
0xffffffff
);
nv
km_wr32
(
device
,
NV03_PFIFO_INTR_EN_0
,
0xffffffff
);
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHE1_PUSH0
,
1
);
nv
_wr32
(
fifo
,
NV04_PFIFO_CACHE1_PULL0
,
1
);
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHES
,
1
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH0
,
1
);
nv
km_wr32
(
device
,
NV04_PFIFO_CACHE1_PULL0
,
1
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHES
,
1
);
return
0
;
}
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
View file @
87744403
...
...
@@ -177,29 +177,30 @@ static int
nv17_fifo_init
(
struct
nvkm_object
*
object
)
{
struct
nv04_fifo
*
fifo
=
(
void
*
)
object
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
int
ret
;
ret
=
nvkm_fifo_init
(
&
fifo
->
base
);
if
(
ret
)
return
ret
;
nv
_wr32
(
fifo
,
NV04_PFIFO_DELAY_0
,
0x000000ff
);
nv
_wr32
(
fifo
,
NV04_PFIFO_DMA_TIMESLICE
,
0x0101ffff
);
nv
km_wr32
(
device
,
NV04_PFIFO_DELAY_0
,
0x000000ff
);
nv
km_wr32
(
device
,
NV04_PFIFO_DMA_TIMESLICE
,
0x0101ffff
);
nv
_wr32
(
fifo
,
NV03_PFIFO_RAMHT
,
(
0x03
<<
24
)
/* search 128 */
|
nv
km_wr32
(
device
,
NV03_PFIFO_RAMHT
,
(
0x03
<<
24
)
/* search 128 */
|
((
fifo
->
ramht
->
bits
-
9
)
<<
16
)
|
(
fifo
->
ramht
->
gpuobj
.
addr
>>
8
));
nv
_wr32
(
fifo
,
NV03_PFIFO_RAMRO
,
fifo
->
ramro
->
addr
>>
8
);
nv
_wr32
(
fifo
,
NV03_PFIFO_RAMFC
,
fifo
->
ramfc
->
addr
>>
8
|
0x00010000
);
nv
km_wr32
(
device
,
NV03_PFIFO_RAMRO
,
fifo
->
ramro
->
addr
>>
8
);
nv
km_wr32
(
device
,
NV03_PFIFO_RAMFC
,
fifo
->
ramfc
->
addr
>>
8
|
0x00010000
);
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHE1_PUSH1
,
fifo
->
base
.
max
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH1
,
fifo
->
base
.
max
);
nv
_wr32
(
fifo
,
NV03_PFIFO_INTR_0
,
0xffffffff
);
nv
_wr32
(
fifo
,
NV03_PFIFO_INTR_EN_0
,
0xffffffff
);
nv
km_wr32
(
device
,
NV03_PFIFO_INTR_0
,
0xffffffff
);
nv
km_wr32
(
device
,
NV03_PFIFO_INTR_EN_0
,
0xffffffff
);
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHE1_PUSH0
,
1
);
nv
_wr32
(
fifo
,
NV04_PFIFO_CACHE1_PULL0
,
1
);
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHES
,
1
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH0
,
1
);
nv
km_wr32
(
device
,
NV04_PFIFO_CACHE1_PULL0
,
1
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHES
,
1
);
return
0
;
}
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
View file @
87744403
...
...
@@ -105,6 +105,7 @@ nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
{
struct
nv04_fifo
*
fifo
=
(
void
*
)
parent
->
engine
;
struct
nv04_fifo_chan
*
chan
=
(
void
*
)
parent
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
unsigned
long
flags
;
u32
reg
,
ctx
;
...
...
@@ -125,13 +126,13 @@ nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
spin_lock_irqsave
(
&
fifo
->
base
.
lock
,
flags
);
nv_engctx
(
engctx
)
->
addr
=
nv_gpuobj
(
engctx
)
->
addr
>>
4
;
nv
_mask
(
fifo
,
0x002500
,
0x00000001
,
0x00000000
);
nv
km_mask
(
device
,
0x002500
,
0x00000001
,
0x00000000
);
if
((
nv
_rd32
(
fifo
,
0x003204
)
&
fifo
->
base
.
max
)
==
chan
->
base
.
chid
)
nv
_wr32
(
fifo
,
reg
,
nv_engctx
(
engctx
)
->
addr
);
if
((
nv
km_rd32
(
device
,
0x003204
)
&
fifo
->
base
.
max
)
==
chan
->
base
.
chid
)
nv
km_wr32
(
device
,
reg
,
nv_engctx
(
engctx
)
->
addr
);
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
ctx
,
nv_engctx
(
engctx
)
->
addr
);
nv
_mask
(
fifo
,
0x002500
,
0x00000001
,
0x00000001
);
nv
km_mask
(
device
,
0x002500
,
0x00000001
,
0x00000001
);
spin_unlock_irqrestore
(
&
fifo
->
base
.
lock
,
flags
);
return
0
;
}
...
...
@@ -142,6 +143,7 @@ nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
{
struct
nv04_fifo
*
fifo
=
(
void
*
)
parent
->
engine
;
struct
nv04_fifo_chan
*
chan
=
(
void
*
)
parent
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
unsigned
long
flags
;
u32
reg
,
ctx
;
...
...
@@ -161,13 +163,13 @@ nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
}
spin_lock_irqsave
(
&
fifo
->
base
.
lock
,
flags
);
nv
_mask
(
fifo
,
0x002500
,
0x00000001
,
0x00000000
);
nv
km_mask
(
device
,
0x002500
,
0x00000001
,
0x00000000
);
if
((
nv
_rd32
(
fifo
,
0x003204
)
&
fifo
->
base
.
max
)
==
chan
->
base
.
chid
)
nv
_wr32
(
fifo
,
reg
,
0x00000000
);
if
((
nv
km_rd32
(
device
,
0x003204
)
&
fifo
->
base
.
max
)
==
chan
->
base
.
chid
)
nv
km_wr32
(
device
,
reg
,
0x00000000
);
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
ctx
,
0x00000000
);
nv
_mask
(
fifo
,
0x002500
,
0x00000001
,
0x00000001
);
nv
km_mask
(
device
,
0x002500
,
0x00000001
,
0x00000001
);
spin_unlock_irqrestore
(
&
fifo
->
base
.
lock
,
flags
);
return
0
;
}
...
...
@@ -295,51 +297,52 @@ static int
nv40_fifo_init
(
struct
nvkm_object
*
object
)
{
struct
nv04_fifo
*
fifo
=
(
void
*
)
object
;
struct
nvkm_fb
*
fb
=
nvkm_fb
(
object
);
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_fb
*
fb
=
device
->
fb
;
int
ret
;
ret
=
nvkm_fifo_init
(
&
fifo
->
base
);
if
(
ret
)
return
ret
;
nv
_wr32
(
fifo
,
0x002040
,
0x000000ff
);
nv
_wr32
(
fifo
,
0x002044
,
0x2101ffff
);
nv
_wr32
(
fifo
,
0x002058
,
0x00000001
);
nv
km_wr32
(
device
,
0x002040
,
0x000000ff
);
nv
km_wr32
(
device
,
0x002044
,
0x2101ffff
);
nv
km_wr32
(
device
,
0x002058
,
0x00000001
);
nv
_wr32
(
fifo
,
NV03_PFIFO_RAMHT
,
(
0x03
<<
24
)
/* search 128 */
|
nv
km_wr32
(
device
,
NV03_PFIFO_RAMHT
,
(
0x03
<<
24
)
/* search 128 */
|
((
fifo
->
ramht
->
bits
-
9
)
<<
16
)
|
(
fifo
->
ramht
->
gpuobj
.
addr
>>
8
));
nv
_wr32
(
fifo
,
NV03_PFIFO_RAMRO
,
fifo
->
ramro
->
addr
>>
8
);
nv
km_wr32
(
device
,
NV03_PFIFO_RAMRO
,
fifo
->
ramro
->
addr
>>
8
);
switch
(
nv_device
(
fifo
)
->
chipset
)
{
case
0x47
:
case
0x49
:
case
0x4b
:
nv
_wr32
(
fifo
,
0x002230
,
0x00000001
);
nv
km_wr32
(
device
,
0x002230
,
0x00000001
);
case
0x40
:
case
0x41
:
case
0x42
:
case
0x43
:
case
0x45
:
case
0x48
:
nv
_wr32
(
fifo
,
0x002220
,
0x00030002
);
nv
km_wr32
(
device
,
0x002220
,
0x00030002
);
break
;
default:
nv
_wr32
(
fifo
,
0x002230
,
0x00000000
);
nv
_wr32
(
fifo
,
0x002220
,
((
fb
->
ram
->
size
-
512
*
1024
+
nv
km_wr32
(
device
,
0x002230
,
0x00000000
);
nv
km_wr32
(
device
,
0x002220
,
((
fb
->
ram
->
size
-
512
*
1024
+
fifo
->
ramfc
->
addr
)
>>
16
)
|
0x00030000
);
break
;
}
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHE1_PUSH1
,
fifo
->
base
.
max
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH1
,
fifo
->
base
.
max
);
nv
_wr32
(
fifo
,
NV03_PFIFO_INTR_0
,
0xffffffff
);
nv
_wr32
(
fifo
,
NV03_PFIFO_INTR_EN_0
,
0xffffffff
);
nv
km_wr32
(
device
,
NV03_PFIFO_INTR_0
,
0xffffffff
);
nv
km_wr32
(
device
,
NV03_PFIFO_INTR_EN_0
,
0xffffffff
);
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHE1_PUSH0
,
1
);
nv
_wr32
(
fifo
,
NV04_PFIFO_CACHE1_PULL0
,
1
);
nv
_wr32
(
fifo
,
NV03_PFIFO_CACHES
,
1
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH0
,
1
);
nv
km_wr32
(
device
,
NV04_PFIFO_CACHE1_PULL0
,
1
);
nv
km_wr32
(
device
,
NV03_PFIFO_CACHES
,
1
);
return
0
;
}
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
View file @
87744403
...
...
@@ -41,7 +41,8 @@
static
void
nv50_fifo_playlist_update_locked
(
struct
nv50_fifo
*
fifo
)
{
struct
nvkm_bar
*
bar
=
nvkm_bar
(
fifo
);
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_bar
*
bar
=
device
->
bar
;
struct
nvkm_gpuobj
*
cur
;
int
i
,
p
;
...
...
@@ -49,15 +50,15 @@ nv50_fifo_playlist_update_locked(struct nv50_fifo *fifo)
fifo
->
cur_playlist
=
!
fifo
->
cur_playlist
;
for
(
i
=
fifo
->
base
.
min
,
p
=
0
;
i
<
fifo
->
base
.
max
;
i
++
)
{
if
(
nv
_rd32
(
fifo
,
0x002600
+
(
i
*
4
))
&
0x80000000
)
if
(
nv
km_rd32
(
device
,
0x002600
+
(
i
*
4
))
&
0x80000000
)
nv_wo32
(
cur
,
p
++
*
4
,
i
);
}
bar
->
flush
(
bar
);
nv
_wr32
(
fifo
,
0x0032f4
,
cur
->
addr
>>
12
);
nv
_wr32
(
fifo
,
0x0032ec
,
p
);
nv
_wr32
(
fifo
,
0x002500
,
0x00000101
);
nv
km_wr32
(
device
,
0x0032f4
,
cur
->
addr
>>
12
);
nv
km_wr32
(
device
,
0x0032ec
,
p
);
nv
km_wr32
(
device
,
0x002500
,
0x00000101
);
}
void
...
...
@@ -102,10 +103,11 @@ static int
nv50_fifo_context_detach
(
struct
nvkm_object
*
parent
,
bool
suspend
,
struct
nvkm_object
*
object
)
{
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
nv50_fifo
*
fifo
=
(
void
*
)
parent
->
engine
;
struct
nv50_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
struct
nv50_fifo_chan
*
chan
=
(
void
*
)
parent
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_bar
*
bar
=
device
->
bar
;
u32
addr
,
me
;
int
ret
=
0
;
...
...
@@ -129,17 +131,17 @@ nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend,
* there's also a "ignore these engines" bitmask reg we can use
* if we hit the issue there..
*/
me
=
nv
_mask
(
fifo
,
0x00b860
,
0x00000001
,
0x00000001
);
me
=
nv
km_mask
(
device
,
0x00b860
,
0x00000001
,
0x00000001
);
/* do the kickoff... */
nv
_wr32
(
fifo
,
0x0032fc
,
nv_gpuobj
(
base
)
->
addr
>>
12
);
nv
km_wr32
(
device
,
0x0032fc
,
nv_gpuobj
(
base
)
->
addr
>>
12
);
if
(
!
nv_wait_ne
(
fifo
,
0x0032fc
,
0xffffffff
,
0xffffffff
))
{
nv_error
(
fifo
,
"channel %d [%s] unload timeout
\n
"
,
chan
->
base
.
chid
,
nvkm_client_name
(
chan
));
if
(
suspend
)
ret
=
-
EBUSY
;
}
nv
_wr32
(
fifo
,
0x00b860
,
me
);
nv
km_wr32
(
device
,
0x00b860
,
me
);
if
(
ret
==
0
)
{
nv_wo32
(
base
->
eng
,
addr
+
0x00
,
0x00000000
);
...
...
@@ -324,6 +326,7 @@ nv50_fifo_chan_init(struct nvkm_object *object)
struct
nv50_fifo_base
*
base
=
(
void
*
)
object
->
parent
;
struct
nv50_fifo_chan
*
chan
=
(
void
*
)
object
;
struct
nvkm_gpuobj
*
ramfc
=
base
->
ramfc
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
chid
=
chan
->
base
.
chid
;
int
ret
;
...
...
@@ -331,7 +334,7 @@ nv50_fifo_chan_init(struct nvkm_object *object)
if
(
ret
)
return
ret
;
nv
_wr32
(
fifo
,
0x002600
+
(
chid
*
4
),
0x80000000
|
ramfc
->
addr
>>
12
);
nv
km_wr32
(
device
,
0x002600
+
(
chid
*
4
),
0x80000000
|
ramfc
->
addr
>>
12
);
nv50_fifo_playlist_update
(
fifo
);
return
0
;
}
...
...
@@ -341,12 +344,13 @@ nv50_fifo_chan_fini(struct nvkm_object *object, bool suspend)
{
struct
nv50_fifo
*
fifo
=
(
void
*
)
object
->
engine
;
struct
nv50_fifo_chan
*
chan
=
(
void
*
)
object
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
chid
=
chan
->
base
.
chid
;
/* remove channel from playlist, fifo will unload context */
nv
_mask
(
fifo
,
0x002600
+
(
chid
*
4
),
0x80000000
,
0x00000000
);
nv
km_mask
(
device
,
0x002600
+
(
chid
*
4
),
0x80000000
,
0x00000000
);
nv50_fifo_playlist_update
(
fifo
);
nv
_wr32
(
fifo
,
0x002600
+
(
chid
*
4
),
0x00000000
);
nv
km_wr32
(
device
,
0x002600
+
(
chid
*
4
),
0x00000000
);
return
nvkm_fifo_channel_fini
(
&
chan
->
base
,
suspend
);
}
...
...
@@ -498,27 +502,28 @@ int
nv50_fifo_init
(
struct
nvkm_object
*
object
)
{
struct
nv50_fifo
*
fifo
=
(
void
*
)
object
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
int
ret
,
i
;
ret
=
nvkm_fifo_init
(
&
fifo
->
base
);
if
(
ret
)
return
ret
;
nv
_mask
(
fifo
,
0x000200
,
0x00000100
,
0x00000000
);
nv
_mask
(
fifo
,
0x000200
,
0x00000100
,
0x00000100
);
nv
_wr32
(
fifo
,
0x00250c
,
0x6f3cfc34
);
nv
_wr32
(
fifo
,
0x002044
,
0x01003fff
);
nv
km_mask
(
device
,
0x000200
,
0x00000100
,
0x00000000
);
nv
km_mask
(
device
,
0x000200
,
0x00000100
,
0x00000100
);
nv
km_wr32
(
device
,
0x00250c
,
0x6f3cfc34
);
nv
km_wr32
(
device
,
0x002044
,
0x01003fff
);
nv
_wr32
(
fifo
,
0x002100
,
0xffffffff
);
nv
_wr32
(
fifo
,
0x002140
,
0xbfffffff
);
nv
km_wr32
(
device
,
0x002100
,
0xffffffff
);
nv
km_wr32
(
device
,
0x002140
,
0xbfffffff
);
for
(
i
=
0
;
i
<
128
;
i
++
)
nv
_wr32
(
fifo
,
0x002600
+
(
i
*
4
),
0x00000000
);
nv
km_wr32
(
device
,
0x002600
+
(
i
*
4
),
0x00000000
);
nv50_fifo_playlist_update_locked
(
fifo
);
nv
_wr32
(
fifo
,
0x003200
,
0x00000001
);
nv
_wr32
(
fifo
,
0x003250
,
0x00000001
);
nv
_wr32
(
fifo
,
0x002500
,
0x00000001
);
nv
km_wr32
(
device
,
0x003200
,
0x00000001
);
nv
km_wr32
(
device
,
0x003250
,
0x00000001
);
nv
km_wr32
(
device
,
0x002500
,
0x00000001
);
return
0
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment