Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
3430abd6
Commit
3430abd6
authored
Jul 01, 2019
by
Joerg Roedel
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'arm/renesas' into arm/smmu
parents
39debdc1
da38e9ec
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
125 additions
and
62 deletions
+125
-62
drivers/iommu/ipmmu-vmsa.c
drivers/iommu/ipmmu-vmsa.c
+125
-62
No files found.
drivers/iommu/ipmmu-vmsa.c
View file @
3430abd6
...
@@ -36,12 +36,16 @@
...
@@ -36,12 +36,16 @@
#define arm_iommu_detach_device(...) do {} while (0)
#define arm_iommu_detach_device(...) do {} while (0)
#endif
#endif
#define IPMMU_CTX_MAX 8
#define IPMMU_CTX_MAX 8U
#define IPMMU_CTX_INVALID -1
#define IPMMU_UTLB_MAX 48U
struct
ipmmu_features
{
struct
ipmmu_features
{
bool
use_ns_alias_offset
;
bool
use_ns_alias_offset
;
bool
has_cache_leaf_nodes
;
bool
has_cache_leaf_nodes
;
unsigned
int
number_of_contexts
;
unsigned
int
number_of_contexts
;
unsigned
int
num_utlbs
;
bool
setup_imbuscr
;
bool
setup_imbuscr
;
bool
twobit_imttbcr_sl0
;
bool
twobit_imttbcr_sl0
;
bool
reserved_context
;
bool
reserved_context
;
...
@@ -53,11 +57,11 @@ struct ipmmu_vmsa_device {
...
@@ -53,11 +57,11 @@ struct ipmmu_vmsa_device {
struct
iommu_device
iommu
;
struct
iommu_device
iommu
;
struct
ipmmu_vmsa_device
*
root
;
struct
ipmmu_vmsa_device
*
root
;
const
struct
ipmmu_features
*
features
;
const
struct
ipmmu_features
*
features
;
unsigned
int
num_utlbs
;
unsigned
int
num_ctx
;
unsigned
int
num_ctx
;
spinlock_t
lock
;
/* Protects ctx and domains[] */
spinlock_t
lock
;
/* Protects ctx and domains[] */
DECLARE_BITMAP
(
ctx
,
IPMMU_CTX_MAX
);
DECLARE_BITMAP
(
ctx
,
IPMMU_CTX_MAX
);
struct
ipmmu_vmsa_domain
*
domains
[
IPMMU_CTX_MAX
];
struct
ipmmu_vmsa_domain
*
domains
[
IPMMU_CTX_MAX
];
s8
utlb_ctx
[
IPMMU_UTLB_MAX
];
struct
iommu_group
*
group
;
struct
iommu_group
*
group
;
struct
dma_iommu_mapping
*
mapping
;
struct
dma_iommu_mapping
*
mapping
;
...
@@ -186,7 +190,8 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
...
@@ -186,7 +190,8 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
#define IMMAIR_ATTR_IDX_WBRWA 1
#define IMMAIR_ATTR_IDX_WBRWA 1
#define IMMAIR_ATTR_IDX_DEV 2
#define IMMAIR_ATTR_IDX_DEV 2
#define IMEAR 0x0030
#define IMELAR 0x0030
/* IMEAR on R-Car Gen2 */
#define IMEUAR 0x0034
/* R-Car Gen3 only */
#define IMPCTR 0x0200
#define IMPCTR 0x0200
#define IMPSTR 0x0208
#define IMPSTR 0x0208
...
@@ -334,6 +339,7 @@ static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
...
@@ -334,6 +339,7 @@ static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
ipmmu_write
(
mmu
,
IMUCTR
(
utlb
),
ipmmu_write
(
mmu
,
IMUCTR
(
utlb
),
IMUCTR_TTSEL_MMU
(
domain
->
context_id
)
|
IMUCTR_FLUSH
|
IMUCTR_TTSEL_MMU
(
domain
->
context_id
)
|
IMUCTR_FLUSH
|
IMUCTR_MMUEN
);
IMUCTR_MMUEN
);
mmu
->
utlb_ctx
[
utlb
]
=
domain
->
context_id
;
}
}
/*
/*
...
@@ -345,6 +351,7 @@ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
...
@@ -345,6 +351,7 @@ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
struct
ipmmu_vmsa_device
*
mmu
=
domain
->
mmu
;
struct
ipmmu_vmsa_device
*
mmu
=
domain
->
mmu
;
ipmmu_write
(
mmu
,
IMUCTR
(
utlb
),
0
);
ipmmu_write
(
mmu
,
IMUCTR
(
utlb
),
0
);
mmu
->
utlb_ctx
[
utlb
]
=
IPMMU_CTX_INVALID
;
}
}
static
void
ipmmu_tlb_flush_all
(
void
*
cookie
)
static
void
ipmmu_tlb_flush_all
(
void
*
cookie
)
...
@@ -403,53 +410,10 @@ static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
...
@@ -403,53 +410,10 @@ static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
spin_unlock_irqrestore
(
&
mmu
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
mmu
->
lock
,
flags
);
}
}
static
int
ipmmu_domain_init
_context
(
struct
ipmmu_vmsa_domain
*
domain
)
static
void
ipmmu_domain_setup
_context
(
struct
ipmmu_vmsa_domain
*
domain
)
{
{
u64
ttbr
;
u64
ttbr
;
u32
tmp
;
u32
tmp
;
int
ret
;
/*
* Allocate the page table operations.
*
* VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
* access, Long-descriptor format" that the NStable bit being set in a
* table descriptor will result in the NStable and NS bits of all child
* entries being ignored and considered as being set. The IPMMU seems
* not to comply with this, as it generates a secure access page fault
* if any of the NStable and NS bits isn't set when running in
* non-secure mode.
*/
domain
->
cfg
.
quirks
=
IO_PGTABLE_QUIRK_ARM_NS
;
domain
->
cfg
.
pgsize_bitmap
=
SZ_1G
|
SZ_2M
|
SZ_4K
;
domain
->
cfg
.
ias
=
32
;
domain
->
cfg
.
oas
=
40
;
domain
->
cfg
.
tlb
=
&
ipmmu_gather_ops
;
domain
->
io_domain
.
geometry
.
aperture_end
=
DMA_BIT_MASK
(
32
);
domain
->
io_domain
.
geometry
.
force_aperture
=
true
;
/*
* TODO: Add support for coherent walk through CCI with DVM and remove
* cache handling. For now, delegate it to the io-pgtable code.
*/
domain
->
cfg
.
coherent_walk
=
false
;
domain
->
cfg
.
iommu_dev
=
domain
->
mmu
->
root
->
dev
;
/*
* Find an unused context.
*/
ret
=
ipmmu_domain_allocate_context
(
domain
->
mmu
->
root
,
domain
);
if
(
ret
<
0
)
return
ret
;
domain
->
context_id
=
ret
;
domain
->
iop
=
alloc_io_pgtable_ops
(
ARM_32_LPAE_S1
,
&
domain
->
cfg
,
domain
);
if
(
!
domain
->
iop
)
{
ipmmu_domain_free_context
(
domain
->
mmu
->
root
,
domain
->
context_id
);
return
-
EINVAL
;
}
/* TTBR0 */
/* TTBR0 */
ttbr
=
domain
->
cfg
.
arm_lpae_s1_cfg
.
ttbr
[
0
];
ttbr
=
domain
->
cfg
.
arm_lpae_s1_cfg
.
ttbr
[
0
];
...
@@ -495,7 +459,55 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
...
@@ -495,7 +459,55 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
*/
*/
ipmmu_ctx_write_all
(
domain
,
IMCTR
,
ipmmu_ctx_write_all
(
domain
,
IMCTR
,
IMCTR_INTEN
|
IMCTR_FLUSH
|
IMCTR_MMUEN
);
IMCTR_INTEN
|
IMCTR_FLUSH
|
IMCTR_MMUEN
);
}
static
int
ipmmu_domain_init_context
(
struct
ipmmu_vmsa_domain
*
domain
)
{
int
ret
;
/*
* Allocate the page table operations.
*
* VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
* access, Long-descriptor format" that the NStable bit being set in a
* table descriptor will result in the NStable and NS bits of all child
* entries being ignored and considered as being set. The IPMMU seems
* not to comply with this, as it generates a secure access page fault
* if any of the NStable and NS bits isn't set when running in
* non-secure mode.
*/
domain
->
cfg
.
quirks
=
IO_PGTABLE_QUIRK_ARM_NS
;
domain
->
cfg
.
pgsize_bitmap
=
SZ_1G
|
SZ_2M
|
SZ_4K
;
domain
->
cfg
.
ias
=
32
;
domain
->
cfg
.
oas
=
40
;
domain
->
cfg
.
tlb
=
&
ipmmu_gather_ops
;
domain
->
io_domain
.
geometry
.
aperture_end
=
DMA_BIT_MASK
(
32
);
domain
->
io_domain
.
geometry
.
force_aperture
=
true
;
/*
* TODO: Add support for coherent walk through CCI with DVM and remove
* cache handling. For now, delegate it to the io-pgtable code.
*/
domain
->
cfg
.
coherent_walk
=
false
;
domain
->
cfg
.
iommu_dev
=
domain
->
mmu
->
root
->
dev
;
/*
* Find an unused context.
*/
ret
=
ipmmu_domain_allocate_context
(
domain
->
mmu
->
root
,
domain
);
if
(
ret
<
0
)
return
ret
;
domain
->
context_id
=
ret
;
domain
->
iop
=
alloc_io_pgtable_ops
(
ARM_32_LPAE_S1
,
&
domain
->
cfg
,
domain
);
if
(
!
domain
->
iop
)
{
ipmmu_domain_free_context
(
domain
->
mmu
->
root
,
domain
->
context_id
);
return
-
EINVAL
;
}
ipmmu_domain_setup_context
(
domain
);
return
0
;
return
0
;
}
}
...
@@ -523,14 +535,16 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
...
@@ -523,14 +535,16 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
{
{
const
u32
err_mask
=
IMSTR_MHIT
|
IMSTR_ABORT
|
IMSTR_PF
|
IMSTR_TF
;
const
u32
err_mask
=
IMSTR_MHIT
|
IMSTR_ABORT
|
IMSTR_PF
|
IMSTR_TF
;
struct
ipmmu_vmsa_device
*
mmu
=
domain
->
mmu
;
struct
ipmmu_vmsa_device
*
mmu
=
domain
->
mmu
;
unsigned
long
iova
;
u32
status
;
u32
status
;
u32
iova
;
status
=
ipmmu_ctx_read_root
(
domain
,
IMSTR
);
status
=
ipmmu_ctx_read_root
(
domain
,
IMSTR
);
if
(
!
(
status
&
err_mask
))
if
(
!
(
status
&
err_mask
))
return
IRQ_NONE
;
return
IRQ_NONE
;
iova
=
ipmmu_ctx_read_root
(
domain
,
IMEAR
);
iova
=
ipmmu_ctx_read_root
(
domain
,
IMELAR
);
if
(
IS_ENABLED
(
CONFIG_64BIT
))
iova
|=
(
u64
)
ipmmu_ctx_read_root
(
domain
,
IMEUAR
)
<<
32
;
/*
/*
* Clear the error status flags. Unlike traditional interrupt flag
* Clear the error status flags. Unlike traditional interrupt flag
...
@@ -542,10 +556,10 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
...
@@ -542,10 +556,10 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
/* Log fatal errors. */
/* Log fatal errors. */
if
(
status
&
IMSTR_MHIT
)
if
(
status
&
IMSTR_MHIT
)
dev_err_ratelimited
(
mmu
->
dev
,
"Multiple TLB hits @0x%
08
x
\n
"
,
dev_err_ratelimited
(
mmu
->
dev
,
"Multiple TLB hits @0x%
l
x
\n
"
,
iova
);
iova
);
if
(
status
&
IMSTR_ABORT
)
if
(
status
&
IMSTR_ABORT
)
dev_err_ratelimited
(
mmu
->
dev
,
"Page Table Walk Abort @0x%
08
x
\n
"
,
dev_err_ratelimited
(
mmu
->
dev
,
"Page Table Walk Abort @0x%
l
x
\n
"
,
iova
);
iova
);
if
(
!
(
status
&
(
IMSTR_PF
|
IMSTR_TF
)))
if
(
!
(
status
&
(
IMSTR_PF
|
IMSTR_TF
)))
...
@@ -561,7 +575,7 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
...
@@ -561,7 +575,7 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
return
IRQ_HANDLED
;
return
IRQ_HANDLED
;
dev_err_ratelimited
(
mmu
->
dev
,
dev_err_ratelimited
(
mmu
->
dev
,
"Unhandled fault: status 0x%08x iova 0x%
08
x
\n
"
,
"Unhandled fault: status 0x%08x iova 0x%
l
x
\n
"
,
status
,
iova
);
status
,
iova
);
return
IRQ_HANDLED
;
return
IRQ_HANDLED
;
...
@@ -886,27 +900,37 @@ static int ipmmu_init_arm_mapping(struct device *dev)
...
@@ -886,27 +900,37 @@ static int ipmmu_init_arm_mapping(struct device *dev)
static
int
ipmmu_add_device
(
struct
device
*
dev
)
static
int
ipmmu_add_device
(
struct
device
*
dev
)
{
{
struct
ipmmu_vmsa_device
*
mmu
=
to_ipmmu
(
dev
);
struct
iommu_group
*
group
;
struct
iommu_group
*
group
;
int
ret
;
/*
/*
* Only let through devices that have been verified in xlate()
* Only let through devices that have been verified in xlate()
*/
*/
if
(
!
to_ipmmu
(
dev
)
)
if
(
!
mmu
)
return
-
ENODEV
;
return
-
ENODEV
;
if
(
IS_ENABLED
(
CONFIG_ARM
)
&&
!
IS_ENABLED
(
CONFIG_IOMMU_DMA
))
if
(
IS_ENABLED
(
CONFIG_ARM
)
&&
!
IS_ENABLED
(
CONFIG_IOMMU_DMA
))
{
return
ipmmu_init_arm_mapping
(
dev
);
ret
=
ipmmu_init_arm_mapping
(
dev
);
if
(
ret
)
return
ret
;
}
else
{
group
=
iommu_group_get_for_dev
(
dev
);
if
(
IS_ERR
(
group
))
return
PTR_ERR
(
group
);
group
=
iommu_group_get_for_dev
(
dev
);
iommu_group_put
(
group
);
if
(
IS_ERR
(
group
))
}
return
PTR_ERR
(
group
);
iommu_
group_put
(
group
);
iommu_
device_link
(
&
mmu
->
iommu
,
dev
);
return
0
;
return
0
;
}
}
static
void
ipmmu_remove_device
(
struct
device
*
dev
)
static
void
ipmmu_remove_device
(
struct
device
*
dev
)
{
{
struct
ipmmu_vmsa_device
*
mmu
=
to_ipmmu
(
dev
);
iommu_device_unlink
(
&
mmu
->
iommu
,
dev
);
arm_iommu_detach_device
(
dev
);
arm_iommu_detach_device
(
dev
);
iommu_group_remove_device
(
dev
);
iommu_group_remove_device
(
dev
);
}
}
...
@@ -960,6 +984,7 @@ static const struct ipmmu_features ipmmu_features_default = {
...
@@ -960,6 +984,7 @@ static const struct ipmmu_features ipmmu_features_default = {
.
use_ns_alias_offset
=
true
,
.
use_ns_alias_offset
=
true
,
.
has_cache_leaf_nodes
=
false
,
.
has_cache_leaf_nodes
=
false
,
.
number_of_contexts
=
1
,
/* software only tested with one context */
.
number_of_contexts
=
1
,
/* software only tested with one context */
.
num_utlbs
=
32
,
.
setup_imbuscr
=
true
,
.
setup_imbuscr
=
true
,
.
twobit_imttbcr_sl0
=
false
,
.
twobit_imttbcr_sl0
=
false
,
.
reserved_context
=
false
,
.
reserved_context
=
false
,
...
@@ -969,6 +994,7 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
...
@@ -969,6 +994,7 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
.
use_ns_alias_offset
=
false
,
.
use_ns_alias_offset
=
false
,
.
has_cache_leaf_nodes
=
true
,
.
has_cache_leaf_nodes
=
true
,
.
number_of_contexts
=
8
,
.
number_of_contexts
=
8
,
.
num_utlbs
=
48
,
.
setup_imbuscr
=
false
,
.
setup_imbuscr
=
false
,
.
twobit_imttbcr_sl0
=
true
,
.
twobit_imttbcr_sl0
=
true
,
.
reserved_context
=
true
,
.
reserved_context
=
true
,
...
@@ -1021,10 +1047,10 @@ static int ipmmu_probe(struct platform_device *pdev)
...
@@ -1021,10 +1047,10 @@ static int ipmmu_probe(struct platform_device *pdev)
}
}
mmu
->
dev
=
&
pdev
->
dev
;
mmu
->
dev
=
&
pdev
->
dev
;
mmu
->
num_utlbs
=
48
;
spin_lock_init
(
&
mmu
->
lock
);
spin_lock_init
(
&
mmu
->
lock
);
bitmap_zero
(
mmu
->
ctx
,
IPMMU_CTX_MAX
);
bitmap_zero
(
mmu
->
ctx
,
IPMMU_CTX_MAX
);
mmu
->
features
=
of_device_get_match_data
(
&
pdev
->
dev
);
mmu
->
features
=
of_device_get_match_data
(
&
pdev
->
dev
);
memset
(
mmu
->
utlb_ctx
,
IPMMU_CTX_INVALID
,
mmu
->
features
->
num_utlbs
);
dma_set_mask_and_coherent
(
&
pdev
->
dev
,
DMA_BIT_MASK
(
40
));
dma_set_mask_and_coherent
(
&
pdev
->
dev
,
DMA_BIT_MASK
(
40
));
/* Map I/O memory and request IRQ. */
/* Map I/O memory and request IRQ. */
...
@@ -1048,8 +1074,7 @@ static int ipmmu_probe(struct platform_device *pdev)
...
@@ -1048,8 +1074,7 @@ static int ipmmu_probe(struct platform_device *pdev)
if
(
mmu
->
features
->
use_ns_alias_offset
)
if
(
mmu
->
features
->
use_ns_alias_offset
)
mmu
->
base
+=
IM_NS_ALIAS_OFFSET
;
mmu
->
base
+=
IM_NS_ALIAS_OFFSET
;
mmu
->
num_ctx
=
min_t
(
unsigned
int
,
IPMMU_CTX_MAX
,
mmu
->
num_ctx
=
min
(
IPMMU_CTX_MAX
,
mmu
->
features
->
number_of_contexts
);
mmu
->
features
->
number_of_contexts
);
irq
=
platform_get_irq
(
pdev
,
0
);
irq
=
platform_get_irq
(
pdev
,
0
);
...
@@ -1141,10 +1166,48 @@ static int ipmmu_remove(struct platform_device *pdev)
...
@@ -1141,10 +1166,48 @@ static int ipmmu_remove(struct platform_device *pdev)
return
0
;
return
0
;
}
}
#ifdef CONFIG_PM_SLEEP
static
int
ipmmu_resume_noirq
(
struct
device
*
dev
)
{
struct
ipmmu_vmsa_device
*
mmu
=
dev_get_drvdata
(
dev
);
unsigned
int
i
;
/* Reset root MMU and restore contexts */
if
(
ipmmu_is_root
(
mmu
))
{
ipmmu_device_reset
(
mmu
);
for
(
i
=
0
;
i
<
mmu
->
num_ctx
;
i
++
)
{
if
(
!
mmu
->
domains
[
i
])
continue
;
ipmmu_domain_setup_context
(
mmu
->
domains
[
i
]);
}
}
/* Re-enable active micro-TLBs */
for
(
i
=
0
;
i
<
mmu
->
features
->
num_utlbs
;
i
++
)
{
if
(
mmu
->
utlb_ctx
[
i
]
==
IPMMU_CTX_INVALID
)
continue
;
ipmmu_utlb_enable
(
mmu
->
root
->
domains
[
mmu
->
utlb_ctx
[
i
]],
i
);
}
return
0
;
}
static
const
struct
dev_pm_ops
ipmmu_pm
=
{
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS
(
NULL
,
ipmmu_resume_noirq
)
};
#define DEV_PM_OPS &ipmmu_pm
#else
#define DEV_PM_OPS NULL
#endif
/* CONFIG_PM_SLEEP */
static
struct
platform_driver
ipmmu_driver
=
{
static
struct
platform_driver
ipmmu_driver
=
{
.
driver
=
{
.
driver
=
{
.
name
=
"ipmmu-vmsa"
,
.
name
=
"ipmmu-vmsa"
,
.
of_match_table
=
of_match_ptr
(
ipmmu_of_ids
),
.
of_match_table
=
of_match_ptr
(
ipmmu_of_ids
),
.
pm
=
DEV_PM_OPS
,
},
},
.
probe
=
ipmmu_probe
,
.
probe
=
ipmmu_probe
,
.
remove
=
ipmmu_remove
,
.
remove
=
ipmmu_remove
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment