Commit 66645656 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU updates from Joerg Roedel:
 "A few updates this time, most important and exiciting (to me) is:

   - The new ARM SMMU driver.  This is a common IOMMU driver that will
     hopefully be used in a lot of upcoming ARM chips.  So the mess in
     the past where every SOC had its own IOMMU will be over.

  Besides that:

   - Some important fixes in the IOMMU unmap path.  There are fixes in
     the common code and also in the AMD IOMMU driver.
   - Other random fixes"

* tag 'iommu-updates-v3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  MAINTAINERS: add entry for ARM system MMU driver
  iommu/arm: Add support for ARM Ltd. System MMU architecture
  documentation/iommu: Add description of ARM System MMU binding
  iommu: Use %pa and %zx instead of casting
  iommu/amd: Only unmap large pages from the first pte
  iommu: Fix compiler warning on pr_debug
  iommu/amd: Fix memory leak in free_pagetable
  iommu: Split iommu_unmaps
  iommu/{vt-d,amd}: Remove multifunction assumption around grouping
  iommu/omap: fix checkpatch warnings in omap iommu code
  iommu/omap: fix printk formats for dma_addr_t
  iommu/vt-d: DMAR reporting table needs at least one DRHD
  iommu/vt-d: Downgrade the warning if enabling irq remapping fails
parents 496fd15b 01ce784a
* ARM System MMU Architecture Implementation
ARM SoCs may contain an implementation of the ARM System Memory
Management Unit Architecture, which can be used to provide 1 or 2 stages
of address translation to bus masters external to the CPU.
The SMMU may also raise interrupts in response to various fault
conditions.
** System MMU required properties:
- compatible : Should be one of:
"arm,smmu-v1"
"arm,smmu-v2"
"arm,mmu-400"
"arm,mmu-500"
depending on the particular implementation and/or the
version of the architecture implemented.
- reg : Base address and size of the SMMU.
- #global-interrupts : The number of global interrupts exposed by the
device.
- interrupts : Interrupt list, with the first #global-irqs entries
corresponding to the global interrupts and any
following entries corresponding to context interrupts,
specified in order of their indexing by the SMMU.
For SMMUv2 implementations, there must be exactly one
interrupt per context bank. In the case of a single,
combined interrupt, it must be listed multiple times.
- mmu-masters : A list of phandles to device nodes representing bus
masters for which the SMMU can provide a translation
and their corresponding StreamIDs (see example below).
Each device node linked from this list must have a
"#stream-id-cells" property, indicating the number of
StreamIDs associated with it.
** System MMU optional properties:
- smmu-parent : When multiple SMMUs are chained together, this
property can be used to provide a phandle to the
parent SMMU (that is the next SMMU on the path going
from the mmu-masters towards memory) node for this
SMMU.
Example:
smmu {
compatible = "arm,smmu-v1";
reg = <0xba5e0000 0x10000>;
#global-interrupts = <2>;
interrupts = <0 32 4>,
<0 33 4>,
<0 34 4>, /* This is the first context interrupt */
<0 35 4>,
<0 36 4>,
<0 37 4>;
/*
* Two DMA controllers, the first with two StreamIDs (0xd01d
* and 0xd01e) and the second with only one (0xd11c).
*/
mmu-masters = <&dma0 0xd01d 0xd01e>,
<&dma1 0xd11c>;
};
...@@ -1333,6 +1333,12 @@ S: Supported ...@@ -1333,6 +1333,12 @@ S: Supported
F: arch/arm/mach-zynq/ F: arch/arm/mach-zynq/
F: drivers/cpuidle/cpuidle-zynq.c F: drivers/cpuidle/cpuidle-zynq.c
ARM SMMU DRIVER
M: Will Deacon <will.deacon@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/iommu/arm-smmu.c
ARM64 PORT (AARCH64 ARCHITECTURE) ARM64 PORT (AARCH64 ARCHITECTURE)
M: Catalin Marinas <catalin.marinas@arm.com> M: Catalin Marinas <catalin.marinas@arm.com>
M: Will Deacon <will.deacon@arm.com> M: Will Deacon <will.deacon@arm.com>
......
...@@ -269,4 +269,17 @@ config SPAPR_TCE_IOMMU ...@@ -269,4 +269,17 @@ config SPAPR_TCE_IOMMU
Enables bits of IOMMU API required by VFIO. The iommu_ops Enables bits of IOMMU API required by VFIO. The iommu_ops
is not implemented as it is not necessary for VFIO. is not implemented as it is not necessary for VFIO.
config ARM_SMMU
bool "ARM Ltd. System MMU (SMMU) Support"
depends on ARM64 || (ARM_LPAE && OF)
select IOMMU_API
select ARM_DMA_USE_IOMMU if ARM
help
Support for implementations of the ARM System MMU architecture
versions 1 and 2. The driver supports both v7l and v8l table
formats with 4k and 64k page sizes.
Say Y here if your SoC includes an IOMMU device implementing
the ARM SMMU architecture.
endif # IOMMU_SUPPORT endif # IOMMU_SUPPORT
...@@ -3,6 +3,7 @@ obj-$(CONFIG_OF_IOMMU) += of_iommu.o ...@@ -3,6 +3,7 @@ obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
......
...@@ -287,14 +287,27 @@ static struct pci_dev *get_isolation_root(struct pci_dev *pdev) ...@@ -287,14 +287,27 @@ static struct pci_dev *get_isolation_root(struct pci_dev *pdev)
/* /*
* If it's a multifunction device that does not support our * If it's a multifunction device that does not support our
* required ACS flags, add to the same group as function 0. * required ACS flags, add to the same group as lowest numbered
* function that also does not suport the required ACS flags.
*/ */
if (dma_pdev->multifunction && if (dma_pdev->multifunction &&
!pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
swap_pci_ref(&dma_pdev, u8 i, slot = PCI_SLOT(dma_pdev->devfn);
pci_get_slot(dma_pdev->bus,
PCI_DEVFN(PCI_SLOT(dma_pdev->devfn), for (i = 0; i < 8; i++) {
0))); struct pci_dev *tmp;
tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
if (!tmp)
continue;
if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
swap_pci_ref(&dma_pdev, tmp);
break;
}
pci_dev_put(tmp);
}
}
/* /*
* Devices on the root bus go through the iommu. If that's not us, * Devices on the root bus go through the iommu. If that's not us,
...@@ -1484,6 +1497,10 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom, ...@@ -1484,6 +1497,10 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
/* Large PTE found which maps this address */ /* Large PTE found which maps this address */
unmap_size = PTE_PAGE_SIZE(*pte); unmap_size = PTE_PAGE_SIZE(*pte);
/* Only unmap from the first pte in the page */
if ((unmap_size - 1) & bus_addr)
break;
count = PAGE_SIZE_PTE_COUNT(unmap_size); count = PAGE_SIZE_PTE_COUNT(unmap_size);
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
pte[i] = 0ULL; pte[i] = 0ULL;
...@@ -1493,7 +1510,7 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom, ...@@ -1493,7 +1510,7 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
unmapped += unmap_size; unmapped += unmap_size;
} }
BUG_ON(!is_power_of_2(unmapped)); BUG_ON(unmapped && !is_power_of_2(unmapped));
return unmapped; return unmapped;
} }
...@@ -1893,34 +1910,59 @@ static void domain_id_free(int id) ...@@ -1893,34 +1910,59 @@ static void domain_id_free(int id)
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
} }
#define DEFINE_FREE_PT_FN(LVL, FN) \
static void free_pt_##LVL (unsigned long __pt) \
{ \
unsigned long p; \
u64 *pt; \
int i; \
\
pt = (u64 *)__pt; \
\
for (i = 0; i < 512; ++i) { \
if (!IOMMU_PTE_PRESENT(pt[i])) \
continue; \
\
p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \
FN(p); \
} \
free_page((unsigned long)pt); \
}
DEFINE_FREE_PT_FN(l2, free_page)
DEFINE_FREE_PT_FN(l3, free_pt_l2)
DEFINE_FREE_PT_FN(l4, free_pt_l3)
DEFINE_FREE_PT_FN(l5, free_pt_l4)
DEFINE_FREE_PT_FN(l6, free_pt_l5)
static void free_pagetable(struct protection_domain *domain) static void free_pagetable(struct protection_domain *domain)
{ {
int i, j; unsigned long root = (unsigned long)domain->pt_root;
u64 *p1, *p2, *p3;
p1 = domain->pt_root;
if (!p1)
return;
for (i = 0; i < 512; ++i) {
if (!IOMMU_PTE_PRESENT(p1[i]))
continue;
p2 = IOMMU_PTE_PAGE(p1[i]);
for (j = 0; j < 512; ++j) {
if (!IOMMU_PTE_PRESENT(p2[j]))
continue;
p3 = IOMMU_PTE_PAGE(p2[j]);
free_page((unsigned long)p3);
}
free_page((unsigned long)p2); switch (domain->mode) {
case PAGE_MODE_NONE:
break;
case PAGE_MODE_1_LEVEL:
free_page(root);
break;
case PAGE_MODE_2_LEVEL:
free_pt_l2(root);
break;
case PAGE_MODE_3_LEVEL:
free_pt_l3(root);
break;
case PAGE_MODE_4_LEVEL:
free_pt_l4(root);
break;
case PAGE_MODE_5_LEVEL:
free_pt_l5(root);
break;
case PAGE_MODE_6_LEVEL:
free_pt_l6(root);
break;
default:
BUG();
} }
free_page((unsigned long)p1);
domain->pt_root = NULL;
} }
static void free_gcr3_tbl_level1(u64 *tbl) static void free_gcr3_tbl_level1(u64 *tbl)
......
This diff is collapsed.
...@@ -309,6 +309,7 @@ parse_dmar_table(void) ...@@ -309,6 +309,7 @@ parse_dmar_table(void)
struct acpi_table_dmar *dmar; struct acpi_table_dmar *dmar;
struct acpi_dmar_header *entry_header; struct acpi_dmar_header *entry_header;
int ret = 0; int ret = 0;
int drhd_count = 0;
/* /*
* Do it again, earlier dmar_tbl mapping could be mapped with * Do it again, earlier dmar_tbl mapping could be mapped with
...@@ -347,6 +348,7 @@ parse_dmar_table(void) ...@@ -347,6 +348,7 @@ parse_dmar_table(void)
switch (entry_header->type) { switch (entry_header->type) {
case ACPI_DMAR_TYPE_HARDWARE_UNIT: case ACPI_DMAR_TYPE_HARDWARE_UNIT:
drhd_count++;
ret = dmar_parse_one_drhd(entry_header); ret = dmar_parse_one_drhd(entry_header);
break; break;
case ACPI_DMAR_TYPE_RESERVED_MEMORY: case ACPI_DMAR_TYPE_RESERVED_MEMORY:
...@@ -371,6 +373,8 @@ parse_dmar_table(void) ...@@ -371,6 +373,8 @@ parse_dmar_table(void)
entry_header = ((void *)entry_header + entry_header->length); entry_header = ((void *)entry_header + entry_header->length);
} }
if (drhd_count == 0)
pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
return ret; return ret;
} }
......
...@@ -4182,14 +4182,27 @@ static int intel_iommu_add_device(struct device *dev) ...@@ -4182,14 +4182,27 @@ static int intel_iommu_add_device(struct device *dev)
/* /*
* If it's a multifunction device that does not support our * If it's a multifunction device that does not support our
* required ACS flags, add to the same group as function 0. * required ACS flags, add to the same group as lowest numbered
* function that also does not suport the required ACS flags.
*/ */
if (dma_pdev->multifunction && if (dma_pdev->multifunction &&
!pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
swap_pci_ref(&dma_pdev, u8 i, slot = PCI_SLOT(dma_pdev->devfn);
pci_get_slot(dma_pdev->bus,
PCI_DEVFN(PCI_SLOT(dma_pdev->devfn), for (i = 0; i < 8; i++) {
0))); struct pci_dev *tmp;
tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
if (!tmp)
continue;
if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
swap_pci_ref(&dma_pdev, tmp);
break;
}
pci_dev_put(tmp);
}
}
/* /*
* Devices on the root bus go through the iommu. If that's not us, * Devices on the root bus go through the iommu. If that's not us,
......
...@@ -664,8 +664,7 @@ static int __init intel_enable_irq_remapping(void) ...@@ -664,8 +664,7 @@ static int __init intel_enable_irq_remapping(void)
*/ */
if (x2apic_present) if (x2apic_present)
WARN(1, KERN_WARNING pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
"Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
return -1; return -1;
} }
......
...@@ -754,6 +754,38 @@ int iommu_domain_has_cap(struct iommu_domain *domain, ...@@ -754,6 +754,38 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
} }
EXPORT_SYMBOL_GPL(iommu_domain_has_cap); EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
static size_t iommu_pgsize(struct iommu_domain *domain,
unsigned long addr_merge, size_t size)
{
unsigned int pgsize_idx;
size_t pgsize;
/* Max page size that still fits into 'size' */
pgsize_idx = __fls(size);
/* need to consider alignment requirements ? */
if (likely(addr_merge)) {
/* Max page size allowed by address */
unsigned int align_pgsize_idx = __ffs(addr_merge);
pgsize_idx = min(pgsize_idx, align_pgsize_idx);
}
/* build a mask of acceptable page sizes */
pgsize = (1UL << (pgsize_idx + 1)) - 1;
/* throw away page sizes not supported by the hardware */
pgsize &= domain->ops->pgsize_bitmap;
/* make sure we're still sane */
BUG_ON(!pgsize);
/* pick the biggest page */
pgsize_idx = __fls(pgsize);
pgsize = 1UL << pgsize_idx;
return pgsize;
}
int iommu_map(struct iommu_domain *domain, unsigned long iova, int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot)
{ {
...@@ -775,45 +807,18 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -775,45 +807,18 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
* size of the smallest page supported by the hardware * size of the smallest page supported by the hardware
*/ */
if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz " pr_err("unaligned: iova 0x%lx pa 0x%pa size 0x%zx min_pagesz 0x%x\n",
"0x%x\n", iova, (unsigned long)paddr, iova, &paddr, size, min_pagesz);
(unsigned long)size, min_pagesz);
return -EINVAL; return -EINVAL;
} }
pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova, pr_debug("map: iova 0x%lx pa 0x%pa size 0x%zx\n", iova, &paddr, size);
(unsigned long)paddr, (unsigned long)size);
while (size) { while (size) {
unsigned long pgsize, addr_merge = iova | paddr; size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
unsigned int pgsize_idx;
/* Max page size that still fits into 'size' */
pgsize_idx = __fls(size);
/* need to consider alignment requirements ? */
if (likely(addr_merge)) {
/* Max page size allowed by both iova and paddr */
unsigned int align_pgsize_idx = __ffs(addr_merge);
pgsize_idx = min(pgsize_idx, align_pgsize_idx);
}
/* build a mask of acceptable page sizes */
pgsize = (1UL << (pgsize_idx + 1)) - 1;
/* throw away page sizes not supported by the hardware */
pgsize &= domain->ops->pgsize_bitmap;
/* make sure we're still sane */
BUG_ON(!pgsize);
/* pick the biggest page */
pgsize_idx = __fls(pgsize);
pgsize = 1UL << pgsize_idx;
pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova, pr_debug("mapping: iova 0x%lx pa 0x%pa pgsize 0x%zx\n",
(unsigned long)paddr, pgsize); iova, &paddr, pgsize);
ret = domain->ops->map(domain, iova, paddr, pgsize, prot); ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
if (ret) if (ret)
...@@ -850,27 +855,26 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) ...@@ -850,27 +855,26 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
* by the hardware * by the hardware
*/ */
if (!IS_ALIGNED(iova | size, min_pagesz)) { if (!IS_ALIGNED(iova | size, min_pagesz)) {
pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n", pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
iova, (unsigned long)size, min_pagesz); iova, size, min_pagesz);
return -EINVAL; return -EINVAL;
} }
pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova, pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
(unsigned long)size);
/* /*
* Keep iterating until we either unmap 'size' bytes (or more) * Keep iterating until we either unmap 'size' bytes (or more)
* or we hit an area that isn't mapped. * or we hit an area that isn't mapped.
*/ */
while (unmapped < size) { while (unmapped < size) {
size_t left = size - unmapped; size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
unmapped_page = domain->ops->unmap(domain, iova, left); unmapped_page = domain->ops->unmap(domain, iova, pgsize);
if (!unmapped_page) if (!unmapped_page)
break; break;
pr_debug("unmapped: iova 0x%lx size %lx\n", iova, pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
(unsigned long)unmapped_page); iova, unmapped_page);
iova += unmapped_page; iova += unmapped_page;
unmapped += unmapped_page; unmapped += unmapped_page;
......
...@@ -833,16 +833,15 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) ...@@ -833,16 +833,15 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
iopgd = iopgd_offset(obj, da); iopgd = iopgd_offset(obj, da);
if (!iopgd_is_table(*iopgd)) { if (!iopgd_is_table(*iopgd)) {
dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p " dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
"*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd); obj->name, errs, da, iopgd, *iopgd);
return IRQ_NONE; return IRQ_NONE;
} }
iopte = iopte_offset(iopgd, da); iopte = iopte_offset(iopgd, da);
dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x " dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
"pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd, obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
iopte, *iopte);
return IRQ_NONE; return IRQ_NONE;
} }
...@@ -1235,14 +1234,16 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, ...@@ -1235,14 +1234,16 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
else if (iopte_is_large(*pte)) else if (iopte_is_large(*pte))
ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
else else
dev_err(dev, "bogus pte 0x%x, da 0x%lx", *pte, da); dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
(unsigned long long)da);
} else { } else {
if (iopgd_is_section(*pgd)) if (iopgd_is_section(*pgd))
ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
else if (iopgd_is_super(*pgd)) else if (iopgd_is_super(*pgd))
ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
else else
dev_err(dev, "bogus pgd 0x%x, da 0x%lx", *pgd, da); dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
(unsigned long long)da);
} }
return ret; return ret;
......
...@@ -95,4 +95,4 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask) ...@@ -95,4 +95,4 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
#define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da)) #define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da))
#define to_iommu(dev) \ #define to_iommu(dev) \
(struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)) ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
...@@ -102,8 +102,8 @@ static size_t sgtable_len(const struct sg_table *sgt) ...@@ -102,8 +102,8 @@ static size_t sgtable_len(const struct sg_table *sgt)
} }
if (i && sg->offset) { if (i && sg->offset) {
pr_err("%s: sg[%d] offset not allowed in internal " pr_err("%s: sg[%d] offset not allowed in internal entries\n",
"entries\n", __func__, i); __func__, i);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment