Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
a5934725
Commit
a5934725
authored
Oct 13, 2017
by
Joerg Roedel
Browse files
Options
Browse Files
Download
Plain Diff
Merge branches 'iommu/fixes', 'arm/omap', 'arm/exynos', 'x86/amd', 'x86/vt-d' and 'core' into next
parents
ce76353f
9d5018de
7a974b29
53b9ec3f
b117e038
538d5b33
Changes
20
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
518 additions
and
321 deletions
+518
-321
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tegra/drm.c
+1
-2
drivers/gpu/host1x/dev.c
drivers/gpu/host1x/dev.c
+1
-2
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu.c
+21
-15
drivers/iommu/arm-smmu-v3.c
drivers/iommu/arm-smmu-v3.c
+10
-0
drivers/iommu/arm-smmu.c
drivers/iommu/arm-smmu.c
+15
-5
drivers/iommu/dma-iommu.c
drivers/iommu/dma-iommu.c
+5
-19
drivers/iommu/dmar.c
drivers/iommu/dmar.c
+5
-2
drivers/iommu/exynos-iommu.c
drivers/iommu/exynos-iommu.c
+16
-7
drivers/iommu/intel-iommu.c
drivers/iommu/intel-iommu.c
+17
-11
drivers/iommu/io-pgtable-arm-v7s.c
drivers/iommu/io-pgtable-arm-v7s.c
+1
-6
drivers/iommu/io-pgtable-arm.c
drivers/iommu/io-pgtable-arm.c
+1
-6
drivers/iommu/iova.c
drivers/iommu/iova.c
+87
-129
drivers/iommu/ipmmu-vmsa.c
drivers/iommu/ipmmu-vmsa.c
+10
-0
drivers/iommu/mtk_iommu.c
drivers/iommu/mtk_iommu.c
+7
-0
drivers/iommu/omap-iommu.c
drivers/iommu/omap-iommu.c
+282
-93
drivers/iommu/omap-iommu.h
drivers/iommu/omap-iommu.h
+14
-16
drivers/iommu/qcom_iommu.c
drivers/iommu/qcom_iommu.c
+15
-0
drivers/misc/mic/scif/scif_rma.c
drivers/misc/mic/scif/scif_rma.c
+1
-2
include/linux/dmar.h
include/linux/dmar.h
+1
-0
include/linux/iova.h
include/linux/iova.h
+8
-6
No files found.
drivers/gpu/drm/tegra/drm.c
View file @
a5934725
...
...
@@ -155,8 +155,7 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
order
=
__ffs
(
tegra
->
domain
->
pgsize_bitmap
);
init_iova_domain
(
&
tegra
->
carveout
.
domain
,
1UL
<<
order
,
carveout_start
>>
order
,
carveout_end
>>
order
);
carveout_start
>>
order
);
tegra
->
carveout
.
shift
=
iova_shift
(
&
tegra
->
carveout
.
domain
);
tegra
->
carveout
.
limit
=
carveout_end
>>
tegra
->
carveout
.
shift
;
...
...
drivers/gpu/host1x/dev.c
View file @
a5934725
...
...
@@ -198,8 +198,7 @@ static int host1x_probe(struct platform_device *pdev)
order
=
__ffs
(
host
->
domain
->
pgsize_bitmap
);
init_iova_domain
(
&
host
->
iova
,
1UL
<<
order
,
geometry
->
aperture_start
>>
order
,
geometry
->
aperture_end
>>
order
);
geometry
->
aperture_start
>>
order
);
host
->
iova_end
=
geometry
->
aperture_end
;
}
...
...
drivers/iommu/amd_iommu.c
View file @
a5934725
...
...
@@ -63,7 +63,6 @@
/* IO virtual address start page frame number */
#define IOVA_START_PFN (1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
/* Reserved IOVA ranges */
#define MSI_RANGE_START (0xfee00000)
...
...
@@ -1547,10 +1546,11 @@ static unsigned long dma_ops_alloc_iova(struct device *dev,
if
(
dma_mask
>
DMA_BIT_MASK
(
32
))
pfn
=
alloc_iova_fast
(
&
dma_dom
->
iovad
,
pages
,
IOVA_PFN
(
DMA_BIT_MASK
(
32
)));
IOVA_PFN
(
DMA_BIT_MASK
(
32
))
,
false
);
if
(
!
pfn
)
pfn
=
alloc_iova_fast
(
&
dma_dom
->
iovad
,
pages
,
IOVA_PFN
(
dma_mask
));
pfn
=
alloc_iova_fast
(
&
dma_dom
->
iovad
,
pages
,
IOVA_PFN
(
dma_mask
),
true
);
return
(
pfn
<<
PAGE_SHIFT
);
}
...
...
@@ -1788,8 +1788,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void)
if
(
!
dma_dom
->
domain
.
pt_root
)
goto
free_dma_dom
;
init_iova_domain
(
&
dma_dom
->
iovad
,
PAGE_SIZE
,
IOVA_START_PFN
,
DMA_32BIT_PFN
);
init_iova_domain
(
&
dma_dom
->
iovad
,
PAGE_SIZE
,
IOVA_START_PFN
);
if
(
init_iova_flush_queue
(
&
dma_dom
->
iovad
,
iova_domain_flush_tlb
,
NULL
))
goto
free_dma_dom
;
...
...
@@ -2696,8 +2695,7 @@ static int init_reserved_iova_ranges(void)
struct
pci_dev
*
pdev
=
NULL
;
struct
iova
*
val
;
init_iova_domain
(
&
reserved_iova_ranges
,
PAGE_SIZE
,
IOVA_START_PFN
,
DMA_32BIT_PFN
);
init_iova_domain
(
&
reserved_iova_ranges
,
PAGE_SIZE
,
IOVA_START_PFN
);
lockdep_set_class
(
&
reserved_iova_ranges
.
iova_rbtree_lock
,
&
reserved_rbtree_key
);
...
...
@@ -3663,11 +3661,11 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
return
table
;
}
static
int
alloc_irq_index
(
u16
devid
,
int
count
)
static
int
alloc_irq_index
(
u16
devid
,
int
count
,
bool
align
)
{
struct
irq_remap_table
*
table
;
int
index
,
c
,
alignment
=
1
;
unsigned
long
flags
;
int
index
,
c
;
struct
amd_iommu
*
iommu
=
amd_iommu_rlookup_table
[
devid
];
if
(
!
iommu
)
...
...
@@ -3677,16 +3675,22 @@ static int alloc_irq_index(u16 devid, int count)
if
(
!
table
)
return
-
ENODEV
;
if
(
align
)
alignment
=
roundup_pow_of_two
(
count
);
spin_lock_irqsave
(
&
table
->
lock
,
flags
);
/* Scan table for free entries */
for
(
c
=
0
,
index
=
table
->
min_index
;
for
(
index
=
ALIGN
(
table
->
min_index
,
alignment
),
c
=
0
;
index
<
MAX_IRQS_PER_TABLE
;
++
index
)
{
if
(
!
iommu
->
irte_ops
->
is_allocated
(
table
,
index
))
index
++
)
{
if
(
!
iommu
->
irte_ops
->
is_allocated
(
table
,
index
))
{
c
+=
1
;
else
c
=
0
;
}
else
{
c
=
0
;
index
=
ALIGN
(
index
,
alignment
);
continue
;
}
if
(
c
==
count
)
{
for
(;
c
!=
0
;
--
c
)
...
...
@@ -4099,7 +4103,9 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
else
ret
=
-
ENOMEM
;
}
else
{
index
=
alloc_irq_index
(
devid
,
nr_irqs
);
bool
align
=
(
info
->
type
==
X86_IRQ_ALLOC_TYPE_MSI
);
index
=
alloc_irq_index
(
devid
,
nr_irqs
,
align
);
}
if
(
index
<
0
)
{
pr_warn
(
"Failed to allocate IRTE
\n
"
);
...
...
drivers/iommu/arm-smmu-v3.c
View file @
a5934725
...
...
@@ -1743,6 +1743,14 @@ arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
return
ops
->
unmap
(
ops
,
iova
,
size
);
}
static
void
arm_smmu_iotlb_sync
(
struct
iommu_domain
*
domain
)
{
struct
arm_smmu_device
*
smmu
=
to_smmu_domain
(
domain
)
->
smmu
;
if
(
smmu
)
__arm_smmu_tlb_sync
(
smmu
);
}
static
phys_addr_t
arm_smmu_iova_to_phys
(
struct
iommu_domain
*
domain
,
dma_addr_t
iova
)
{
...
...
@@ -1963,6 +1971,8 @@ static struct iommu_ops arm_smmu_ops = {
.
map
=
arm_smmu_map
,
.
unmap
=
arm_smmu_unmap
,
.
map_sg
=
default_iommu_map_sg
,
.
flush_iotlb_all
=
arm_smmu_iotlb_sync
,
.
iotlb_sync
=
arm_smmu_iotlb_sync
,
.
iova_to_phys
=
arm_smmu_iova_to_phys
,
.
add_device
=
arm_smmu_add_device
,
.
remove_device
=
arm_smmu_remove_device
,
...
...
drivers/iommu/arm-smmu.c
View file @
a5934725
...
...
@@ -250,6 +250,7 @@ enum arm_smmu_domain_stage {
struct
arm_smmu_domain
{
struct
arm_smmu_device
*
smmu
;
struct
io_pgtable_ops
*
pgtbl_ops
;
const
struct
iommu_gather_ops
*
tlb_ops
;
struct
arm_smmu_cfg
cfg
;
enum
arm_smmu_domain_stage
stage
;
struct
mutex
init_mutex
;
/* Protects smmu pointer */
...
...
@@ -735,7 +736,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
enum
io_pgtable_fmt
fmt
;
struct
arm_smmu_domain
*
smmu_domain
=
to_smmu_domain
(
domain
);
struct
arm_smmu_cfg
*
cfg
=
&
smmu_domain
->
cfg
;
const
struct
iommu_gather_ops
*
tlb_ops
;
mutex_lock
(
&
smmu_domain
->
init_mutex
);
if
(
smmu_domain
->
smmu
)
...
...
@@ -813,7 +813,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ias
=
min
(
ias
,
32UL
);
oas
=
min
(
oas
,
32UL
);
}
tlb_ops
=
&
arm_smmu_s1_tlb_ops
;
smmu_domain
->
tlb_ops
=
&
arm_smmu_s1_tlb_ops
;
break
;
case
ARM_SMMU_DOMAIN_NESTED
:
/*
...
...
@@ -833,9 +833,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
oas
=
min
(
oas
,
40UL
);
}
if
(
smmu
->
version
==
ARM_SMMU_V2
)
tlb_ops
=
&
arm_smmu_s2_tlb_ops_v2
;
smmu_domain
->
tlb_ops
=
&
arm_smmu_s2_tlb_ops_v2
;
else
tlb_ops
=
&
arm_smmu_s2_tlb_ops_v1
;
smmu_domain
->
tlb_ops
=
&
arm_smmu_s2_tlb_ops_v1
;
break
;
default:
ret
=
-
EINVAL
;
...
...
@@ -863,7 +863,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.
pgsize_bitmap
=
smmu
->
pgsize_bitmap
,
.
ias
=
ias
,
.
oas
=
oas
,
.
tlb
=
tlb_ops
,
.
tlb
=
smmu_domain
->
tlb_ops
,
.
iommu_dev
=
smmu
->
dev
,
};
...
...
@@ -1259,6 +1259,14 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
return
ops
->
unmap
(
ops
,
iova
,
size
);
}
static
void
arm_smmu_iotlb_sync
(
struct
iommu_domain
*
domain
)
{
struct
arm_smmu_domain
*
smmu_domain
=
to_smmu_domain
(
domain
);
if
(
smmu_domain
->
tlb_ops
)
smmu_domain
->
tlb_ops
->
tlb_sync
(
smmu_domain
);
}
static
phys_addr_t
arm_smmu_iova_to_phys_hard
(
struct
iommu_domain
*
domain
,
dma_addr_t
iova
)
{
...
...
@@ -1562,6 +1570,8 @@ static struct iommu_ops arm_smmu_ops = {
.
map
=
arm_smmu_map
,
.
unmap
=
arm_smmu_unmap
,
.
map_sg
=
default_iommu_map_sg
,
.
flush_iotlb_all
=
arm_smmu_iotlb_sync
,
.
iotlb_sync
=
arm_smmu_iotlb_sync
,
.
iova_to_phys
=
arm_smmu_iova_to_phys
,
.
add_device
=
arm_smmu_add_device
,
.
remove_device
=
arm_smmu_remove_device
,
...
...
drivers/iommu/dma-iommu.c
View file @
a5934725
...
...
@@ -292,18 +292,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
/* ...then finally give it a kicking to make sure it fits */
base_pfn
=
max_t
(
unsigned
long
,
base_pfn
,
domain
->
geometry
.
aperture_start
>>
order
);
end_pfn
=
min_t
(
unsigned
long
,
end_pfn
,
domain
->
geometry
.
aperture_end
>>
order
);
}
/*
* PCI devices may have larger DMA masks, but still prefer allocating
* within a 32-bit mask to avoid DAC addressing. Such limitations don't
* apply to the typical platform device, so for those we may as well
* leave the cache limit at the top of their range to save an rb_last()
* traversal on every allocation.
*/
if
(
dev
&&
dev_is_pci
(
dev
))
end_pfn
&=
DMA_BIT_MASK
(
32
)
>>
order
;
/* start_pfn is always nonzero for an already-initialised domain */
if
(
iovad
->
start_pfn
)
{
...
...
@@ -312,16 +301,11 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
pr_warn
(
"Incompatible range for DMA domain
\n
"
);
return
-
EFAULT
;
}
/*
* If we have devices with different DMA masks, move the free
* area cache limit down for the benefit of the smaller one.
*/
iovad
->
dma_32bit_pfn
=
min
(
end_pfn
+
1
,
iovad
->
dma_32bit_pfn
);
return
0
;
}
init_iova_domain
(
iovad
,
1UL
<<
order
,
base_pfn
,
end_pfn
);
init_iova_domain
(
iovad
,
1UL
<<
order
,
base_pfn
);
if
(
!
dev
)
return
0
;
...
...
@@ -386,10 +370,12 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
/* Try to get PCI devices a SAC address */
if
(
dma_limit
>
DMA_BIT_MASK
(
32
)
&&
dev_is_pci
(
dev
))
iova
=
alloc_iova_fast
(
iovad
,
iova_len
,
DMA_BIT_MASK
(
32
)
>>
shift
);
iova
=
alloc_iova_fast
(
iovad
,
iova_len
,
DMA_BIT_MASK
(
32
)
>>
shift
,
false
);
if
(
!
iova
)
iova
=
alloc_iova_fast
(
iovad
,
iova_len
,
dma_limit
>>
shift
);
iova
=
alloc_iova_fast
(
iovad
,
iova_len
,
dma_limit
>>
shift
,
true
);
return
(
dma_addr_t
)
iova
<<
shift
;
}
...
...
drivers/iommu/dmar.c
View file @
a5934725
...
...
@@ -801,13 +801,16 @@ int __init dmar_dev_scope_init(void)
dmar_free_pci_notify_info
(
info
);
}
}
bus_register_notifier
(
&
pci_bus_type
,
&
dmar_pci_bus_nb
);
}
return
dmar_dev_scope_status
;
}
void
dmar_register_bus_notifier
(
void
)
{
bus_register_notifier
(
&
pci_bus_type
,
&
dmar_pci_bus_nb
);
}
int
__init
dmar_table_init
(
void
)
{
...
...
drivers/iommu/exynos-iommu.c
View file @
a5934725
...
...
@@ -263,6 +263,7 @@ struct exynos_iommu_domain {
struct
sysmmu_drvdata
{
struct
device
*
sysmmu
;
/* SYSMMU controller device */
struct
device
*
master
;
/* master device (owner) */
struct
device_link
*
link
;
/* runtime PM link to master */
void
__iomem
*
sfrbase
;
/* our registers */
struct
clk
*
clk
;
/* SYSMMU's clock */
struct
clk
*
aclk
;
/* SYSMMU's aclk clock */
...
...
@@ -1250,6 +1251,8 @@ static struct iommu_group *get_device_iommu_group(struct device *dev)
static
int
exynos_iommu_add_device
(
struct
device
*
dev
)
{
struct
exynos_iommu_owner
*
owner
=
dev
->
archdata
.
iommu
;
struct
sysmmu_drvdata
*
data
;
struct
iommu_group
*
group
;
if
(
!
has_sysmmu
(
dev
))
...
...
@@ -1260,6 +1263,15 @@ static int exynos_iommu_add_device(struct device *dev)
if
(
IS_ERR
(
group
))
return
PTR_ERR
(
group
);
list_for_each_entry
(
data
,
&
owner
->
controllers
,
owner_node
)
{
/*
* SYSMMU will be runtime activated via device link
* (dependency) to its master device, so there are no
* direct calls to pm_runtime_get/put in this driver.
*/
data
->
link
=
device_link_add
(
dev
,
data
->
sysmmu
,
DL_FLAG_PM_RUNTIME
);
}
iommu_group_put
(
group
);
return
0
;
...
...
@@ -1268,6 +1280,7 @@ static int exynos_iommu_add_device(struct device *dev)
static
void
exynos_iommu_remove_device
(
struct
device
*
dev
)
{
struct
exynos_iommu_owner
*
owner
=
dev
->
archdata
.
iommu
;
struct
sysmmu_drvdata
*
data
;
if
(
!
has_sysmmu
(
dev
))
return
;
...
...
@@ -1283,6 +1296,9 @@ static void exynos_iommu_remove_device(struct device *dev)
}
}
iommu_group_remove_device
(
dev
);
list_for_each_entry
(
data
,
&
owner
->
controllers
,
owner_node
)
device_link_del
(
data
->
link
);
}
static
int
exynos_iommu_of_xlate
(
struct
device
*
dev
,
...
...
@@ -1316,13 +1332,6 @@ static int exynos_iommu_of_xlate(struct device *dev,
list_add_tail
(
&
data
->
owner_node
,
&
owner
->
controllers
);
data
->
master
=
dev
;
/*
* SYSMMU will be runtime activated via device link (dependency) to its
* master device, so there are no direct calls to pm_runtime_get/put
* in this driver.
*/
device_link_add
(
dev
,
data
->
sysmmu
,
DL_FLAG_PM_RUNTIME
);
return
0
;
}
...
...
drivers/iommu/intel-iommu.c
View file @
a5934725
...
...
@@ -82,8 +82,6 @@
#define IOVA_START_PFN (1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
/* page table handling */
#define LEVEL_STRIDE (9)
...
...
@@ -1878,8 +1876,7 @@ static int dmar_init_reserved_ranges(void)
struct
iova
*
iova
;
int
i
;
init_iova_domain
(
&
reserved_iova_list
,
VTD_PAGE_SIZE
,
IOVA_START_PFN
,
DMA_32BIT_PFN
);
init_iova_domain
(
&
reserved_iova_list
,
VTD_PAGE_SIZE
,
IOVA_START_PFN
);
lockdep_set_class
(
&
reserved_iova_list
.
iova_rbtree_lock
,
&
reserved_rbtree_key
);
...
...
@@ -1938,8 +1935,7 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
unsigned
long
sagaw
;
int
err
;
init_iova_domain
(
&
domain
->
iovad
,
VTD_PAGE_SIZE
,
IOVA_START_PFN
,
DMA_32BIT_PFN
);
init_iova_domain
(
&
domain
->
iovad
,
VTD_PAGE_SIZE
,
IOVA_START_PFN
);
err
=
init_iova_flush_queue
(
&
domain
->
iovad
,
iommu_flush_iova
,
iova_entry_free
);
...
...
@@ -2058,7 +2054,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
if
(
context_copied
(
context
))
{
u16
did_old
=
context_domain_id
(
context
);
if
(
did_old
>=
0
&&
did_old
<
cap_ndoms
(
iommu
->
cap
))
{
if
(
did_old
<
cap_ndoms
(
iommu
->
cap
))
{
iommu
->
flush
.
flush_context
(
iommu
,
did_old
,
(((
u16
)
bus
)
<<
8
)
|
devfn
,
DMA_CCMD_MASK_NOBIT
,
...
...
@@ -3473,11 +3469,12 @@ static unsigned long intel_alloc_iova(struct device *dev,
* from higher range
*/
iova_pfn
=
alloc_iova_fast
(
&
domain
->
iovad
,
nrpages
,
IOVA_PFN
(
DMA_BIT_MASK
(
32
)));
IOVA_PFN
(
DMA_BIT_MASK
(
32
))
,
false
);
if
(
iova_pfn
)
return
iova_pfn
;
}
iova_pfn
=
alloc_iova_fast
(
&
domain
->
iovad
,
nrpages
,
IOVA_PFN
(
dma_mask
));
iova_pfn
=
alloc_iova_fast
(
&
domain
->
iovad
,
nrpages
,
IOVA_PFN
(
dma_mask
),
true
);
if
(
unlikely
(
!
iova_pfn
))
{
pr_err
(
"Allocating %ld-page iova for %s failed"
,
nrpages
,
dev_name
(
dev
));
...
...
@@ -4752,6 +4749,16 @@ int __init intel_iommu_init(void)
goto
out_free_dmar
;
}
up_write
(
&
dmar_global_lock
);
/*
* The bus notifier takes the dmar_global_lock, so lockdep will
* complain later when we register it under the lock.
*/
dmar_register_bus_notifier
();
down_write
(
&
dmar_global_lock
);
if
(
no_iommu
||
dmar_disabled
)
{
/*
* We exit the function here to ensure IOMMU's remapping and
...
...
@@ -4897,8 +4904,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
{
int
adjust_width
;
init_iova_domain
(
&
domain
->
iovad
,
VTD_PAGE_SIZE
,
IOVA_START_PFN
,
DMA_32BIT_PFN
);
init_iova_domain
(
&
domain
->
iovad
,
VTD_PAGE_SIZE
,
IOVA_START_PFN
);
domain_reserve_special_ranges
(
domain
);
/* calculate AGAW */
...
...
drivers/iommu/io-pgtable-arm-v7s.c
View file @
a5934725
...
...
@@ -660,16 +660,11 @@ static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t
size
)
{
struct
arm_v7s_io_pgtable
*
data
=
io_pgtable_ops_to_data
(
ops
);
size_t
unmapped
;
if
(
WARN_ON
(
upper_32_bits
(
iova
)))
return
0
;
unmapped
=
__arm_v7s_unmap
(
data
,
iova
,
size
,
1
,
data
->
pgd
);
if
(
unmapped
)
io_pgtable_tlb_sync
(
&
data
->
iop
);
return
unmapped
;
return
__arm_v7s_unmap
(
data
,
iova
,
size
,
1
,
data
->
pgd
);
}
static
phys_addr_t
arm_v7s_iova_to_phys
(
struct
io_pgtable_ops
*
ops
,
...
...
drivers/iommu/io-pgtable-arm.c
View file @
a5934725
...
...
@@ -609,7 +609,6 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
static
int
arm_lpae_unmap
(
struct
io_pgtable_ops
*
ops
,
unsigned
long
iova
,
size_t
size
)
{
size_t
unmapped
;
struct
arm_lpae_io_pgtable
*
data
=
io_pgtable_ops_to_data
(
ops
);
arm_lpae_iopte
*
ptep
=
data
->
pgd
;
int
lvl
=
ARM_LPAE_START_LVL
(
data
);
...
...
@@ -617,11 +616,7 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
if
(
WARN_ON
(
iova
>=
(
1ULL
<<
data
->
iop
.
cfg
.
ias
)))
return
0
;
unmapped
=
__arm_lpae_unmap
(
data
,
iova
,
size
,
lvl
,
ptep
);
if
(
unmapped
)
io_pgtable_tlb_sync
(
&
data
->
iop
);
return
unmapped
;
return
__arm_lpae_unmap
(
data
,
iova
,
size
,
lvl
,
ptep
);
}
static
phys_addr_t
arm_lpae_iova_to_phys
(
struct
io_pgtable_ops
*
ops
,
...
...
drivers/iommu/iova.c
View file @
a5934725
This diff is collapsed.
Click to expand it.
drivers/iommu/ipmmu-vmsa.c
View file @
a5934725
...
...
@@ -619,6 +619,14 @@ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
return
domain
->
iop
->
unmap
(
domain
->
iop
,
iova
,
size
);
}
static
void
ipmmu_iotlb_sync
(
struct
iommu_domain
*
io_domain
)
{
struct
ipmmu_vmsa_domain
*
domain
=
to_vmsa_domain
(
io_domain
);
if
(
domain
->
mmu
)
ipmmu_tlb_flush_all
(
domain
);
}
static
phys_addr_t
ipmmu_iova_to_phys
(
struct
iommu_domain
*
io_domain
,
dma_addr_t
iova
)
{
...
...
@@ -876,6 +884,8 @@ static const struct iommu_ops ipmmu_ops = {
.
detach_dev
=
ipmmu_detach_device
,
.
map
=
ipmmu_map
,
.
unmap
=
ipmmu_unmap
,
.
flush_iotlb_all
=
ipmmu_iotlb_sync
,
.
iotlb_sync
=
ipmmu_iotlb_sync
,
.
map_sg
=
default_iommu_map_sg
,
.
iova_to_phys
=
ipmmu_iova_to_phys
,
.
add_device
=
ipmmu_add_device_dma
,
...
...
drivers/iommu/mtk_iommu.c
View file @
a5934725
...
...
@@ -392,6 +392,11 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain,
return
unmapsz
;
}
static
void
mtk_iommu_iotlb_sync
(
struct
iommu_domain
*
domain
)
{
mtk_iommu_tlb_sync
(
mtk_iommu_get_m4u_data
());
}
static
phys_addr_t
mtk_iommu_iova_to_phys
(
struct
iommu_domain
*
domain
,
dma_addr_t
iova
)
{
...
...
@@ -491,6 +496,8 @@ static struct iommu_ops mtk_iommu_ops = {
.
map
=
mtk_iommu_map
,
.
unmap
=
mtk_iommu_unmap
,
.
map_sg
=
default_iommu_map_sg
,
.
flush_iotlb_all
=
mtk_iommu_iotlb_sync
,
.
iotlb_sync
=
mtk_iommu_iotlb_sync
,
.
iova_to_phys
=
mtk_iommu_iova_to_phys
,
.
add_device
=
mtk_iommu_add_device
,
.
remove_device
=
mtk_iommu_remove_device
,
...
...
drivers/iommu/omap-iommu.c
View file @
a5934725
This diff is collapsed.
Click to expand it.
drivers/iommu/omap-iommu.h
View file @
a5934725
...
...
@@ -28,18 +28,27 @@ struct iotlb_entry {
u32
endian
,
elsz
,
mixed
;
};
/**
* struct omap_iommu_device - omap iommu device data
* @pgtable: page table used by an omap iommu attached to a domain
* @iommu_dev: pointer to store an omap iommu instance attached to a domain
*/
struct
omap_iommu_device
{
u32
*
pgtable
;
struct
omap_iommu
*
iommu_dev
;
};
/**
* struct omap_iommu_domain - omap iommu domain
* @pgtable: the page table
* @iommu_dev: an omap iommu device attached to this domain. only a single
* iommu device can be attached for now.
* @num_iommus: number of iommus in this domain
* @iommus: omap iommu device data for all iommus in this domain
* @dev: Device using this domain.
* @lock: domain lock, should be taken when attaching/detaching
* @domain: generic domain handle used by iommu core code
*/
struct
omap_iommu_domain
{
u32
*
pgtable
;
struct
omap_iommu
*
iommu_dev
;
u32
num_iommus
;
struct
omap_iommu
_device
*
iommus
;
struct
device
*
dev
;
spinlock_t
lock
;
struct
iommu_domain
domain
;
...
...
@@ -97,17 +106,6 @@ struct iotlb_lock {
short
vict
;
};
/**
* dev_to_omap_iommu() - retrieves an omap iommu object from a user device
* @dev: iommu client device
*/
static
inline
struct
omap_iommu
*
dev_to_omap_iommu
(
struct
device
*
dev
)
{
struct
omap_iommu_arch_data
*
arch_data
=
dev
->
archdata
.
iommu
;
return
arch_data
->
iommu_dev
;
}
/*
* MMU Register offsets
*/
...
...
drivers/iommu/qcom_iommu.c
View file @
a5934725
...
...
@@ -443,6 +443,19 @@ static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
return
ret
;
}
static
void
qcom_iommu_iotlb_sync
(
struct
iommu_domain
*
domain
)
{
struct
qcom_iommu_domain
*
qcom_domain
=
to_qcom_iommu_domain
(
domain
);
struct
io_pgtable
*
pgtable
=
container_of
(
qcom_domain
->
pgtbl_ops
,
struct
io_pgtable
,
ops
);
if
(
!
qcom_domain
->
pgtbl_ops
)
return
;
pm_runtime_get_sync
(
qcom_domain
->
iommu
->
dev
);
qcom_iommu_tlb_sync
(
pgtable
->
cookie
);
pm_runtime_put_sync
(
qcom_domain
->
iommu
->
dev
);
}
static
phys_addr_t
qcom_iommu_iova_to_phys
(
struct
iommu_domain
*
domain
,
dma_addr_t
iova
)
{
...
...
@@ -570,6 +583,8 @@ static const struct iommu_ops qcom_iommu_ops = {
.
map
=
qcom_iommu_map
,
.
unmap
=
qcom_iommu_unmap
,
.
map_sg
=
default_iommu_map_sg
,
.
flush_iotlb_all
=
qcom_iommu_iotlb_sync
,
.
iotlb_sync
=
qcom_iommu_iotlb_sync
,
.
iova_to_phys
=
qcom_iommu_iova_to_phys
,
.
add_device
=
qcom_iommu_add_device
,
.
remove_device
=
qcom_iommu_remove_device
,
...
...
drivers/misc/mic/scif/scif_rma.c
View file @
a5934725
...
...
@@ -39,8 +39,7 @@ void scif_rma_ep_init(struct scif_endpt *ep)
struct
scif_endpt_rma_info
*
rma
=
&
ep
->
rma_info
;
mutex_init
(
&
rma
->
rma_lock
);
init_iova_domain
(
&
rma
->
iovad
,
PAGE_SIZE
,
SCIF_IOVA_START_PFN
,
SCIF_DMA_64BIT_PFN
);
init_iova_domain
(
&
rma
->
iovad
,
PAGE_SIZE
,
SCIF_IOVA_START_PFN
);
spin_lock_init
(
&
rma
->
tc_lock
);
mutex_init
(
&
rma
->
mmn_lock
);
INIT_LIST_HEAD
(
&
rma
->
reg_list
);
...
...
include/linux/dmar.h
View file @
a5934725
...
...
@@ -112,6 +112,7 @@ static inline bool dmar_rcu_check(void)
extern
int
dmar_table_init
(
void
);
extern
int
dmar_dev_scope_init
(
void
);
extern
void
dmar_register_bus_notifier
(
void
);
extern
int
dmar_parse_dev_scope
(
void
*
start
,
void
*
end
,
int
*
cnt
,
struct
dmar_dev_scope
**
devices
,
u16
segment
);
extern
void
*
dmar_alloc_dev_scope
(
void
*
start
,
void
*
end
,
int
*
cnt
);
...
...
include/linux/iova.h
View file @
a5934725
...
...
@@ -70,10 +70,12 @@ struct iova_fq {
struct
iova_domain
{
spinlock_t
iova_rbtree_lock
;
/* Lock to protect update of rbtree */
struct
rb_root
rbroot
;
/* iova domain rbtree root */
struct
rb_node
*
cached32_node
;
/* Save last alloced node */
struct
rb_node
*
cached_node
;
/* Save last alloced node */
struct
rb_node
*
cached32_node
;
/* Save last 32-bit alloced node */
unsigned
long
granule
;
/* pfn granularity for this domain */
unsigned
long
start_pfn
;
/* Lower limit for this domain */
unsigned
long
dma_32bit_pfn
;
struct
iova
anchor
;
/* rbtree lookup anchor */
struct
iova_rcache
rcaches
[
IOVA_RANGE_CACHE_MAX_SIZE
];
/* IOVA range caches */
iova_flush_cb
flush_cb
;
/* Call-Back function to flush IOMMU
...
...
@@ -148,12 +150,12 @@ void queue_iova(struct iova_domain *iovad,
unsigned
long
pfn
,
unsigned
long
pages
,
unsigned
long
data
);
unsigned
long
alloc_iova_fast
(
struct
iova_domain
*
iovad
,
unsigned
long
size
,
unsigned
long
limit_pfn
);
unsigned
long
limit_pfn
,
bool
flush_rcache
);
struct
iova
*
reserve_iova
(
struct
iova_domain
*
iovad
,
unsigned
long
pfn_lo
,
unsigned
long
pfn_hi
);
void
copy_reserved_iova
(
struct
iova_domain
*
from
,
struct
iova_domain
*
to
);
void
init_iova_domain
(
struct
iova_domain
*
iovad
,
unsigned
long
granule
,
unsigned
long
start_pfn
,
unsigned
long
pfn_32bit
);
unsigned
long
start_pfn
);
int
init_iova_flush_queue
(
struct
iova_domain
*
iovad
,
iova_flush_cb
flush_cb
,
iova_entry_dtor
entry_dtor
);
struct
iova
*
find_iova
(
struct
iova_domain
*
iovad
,
unsigned
long
pfn
);
...
...
@@ -210,7 +212,8 @@ static inline void queue_iova(struct iova_domain *iovad,
static
inline
unsigned
long
alloc_iova_fast
(
struct
iova_domain
*
iovad
,
unsigned
long
size
,
unsigned
long
limit_pfn
)
unsigned
long
limit_pfn
,
bool
flush_rcache
)
{
return
0
;
}
...
...
@@ -229,8 +232,7 @@ static inline void copy_reserved_iova(struct iova_domain *from,
static
inline
void
init_iova_domain
(
struct
iova_domain
*
iovad
,
unsigned
long
granule
,
unsigned
long
start_pfn
,
unsigned
long
pfn_32bit
)
unsigned
long
start_pfn
)
{
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment