Commit c6f66347 authored by Michael Ellerman's avatar Michael Ellerman

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/scottwood/linux into next

Freescale updates from Scott:

"Highlights include 8xx hugepage support, qbman fixes/cleanup, device
tree updates, and some misc cleanup."
parents ff45000f baae856e
...@@ -158,4 +158,5 @@ ti,tsc2003 I2C Touch-Screen Controller ...@@ -158,4 +158,5 @@ ti,tsc2003 I2C Touch-Screen Controller
ti,tmp102 Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface ti,tmp102 Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface
ti,tmp103 Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface ti,tmp103 Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface
ti,tmp275 Digital Temperature Sensor ti,tmp275 Digital Temperature Sensor
winbond,w83793 Winbond/Nuvoton H/W Monitor
winbond,wpct301 i2c trusted platform module (TPM) winbond,wpct301 i2c trusted platform module (TPM)
...@@ -41,6 +41,27 @@ / { ...@@ -41,6 +41,27 @@ / {
#size-cells = <2>; #size-cells = <2>;
interrupt-parent = <&mpic>; interrupt-parent = <&mpic>;
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
ranges;
bman_fbpr: bman-fbpr {
size = <0 0x1000000>;
alignment = <0 0x1000000>;
};
qman_fqd: qman-fqd {
size = <0 0x400000>;
alignment = <0 0x400000>;
};
qman_pfdr: qman-pfdr {
size = <0 0x2000000>;
alignment = <0 0x2000000>;
};
};
ifc: localbus@ffe124000 { ifc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x2000>; reg = <0xf 0xfe124000 0 0x2000>;
ranges = <0 0 0xf 0xe8000000 0x08000000 ranges = <0 0 0xf 0xe8000000 0x08000000
...@@ -72,6 +93,14 @@ dcsr: dcsr@f00000000 { ...@@ -72,6 +93,14 @@ dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01072000>; ranges = <0x00000000 0xf 0x00000000 0x01072000>;
}; };
bportals: bman-portals@ff4000000 {
ranges = <0x0 0xf 0xf4000000 0x2000000>;
};
qportals: qman-portals@ff6000000 {
ranges = <0x0 0xf 0xf6000000 0x2000000>;
};
soc: soc@ffe000000 { soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>; ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>; reg = <0xf 0xfe000000 0 0x00001000>;
......
...@@ -34,6 +34,21 @@ ...@@ -34,6 +34,21 @@
#include <dt-bindings/thermal/thermal.h> #include <dt-bindings/thermal/thermal.h>
&bman_fbpr {
compatible = "fsl,bman-fbpr";
alloc-ranges = <0 0 0x10000 0>;
};
&qman_fqd {
compatible = "fsl,qman-fqd";
alloc-ranges = <0 0 0x10000 0>;
};
&qman_pfdr {
compatible = "fsl,qman-pfdr";
alloc-ranges = <0 0 0x10000 0>;
};
&ifc { &ifc {
#address-cells = <2>; #address-cells = <2>;
#size-cells = <1>; #size-cells = <1>;
...@@ -180,6 +195,92 @@ dcsr-cpu-sb-proxy@108000 { ...@@ -180,6 +195,92 @@ dcsr-cpu-sb-proxy@108000 {
}; };
}; };
&bportals {
#address-cells = <0x1>;
#size-cells = <0x1>;
compatible = "simple-bus";
bman-portal@0 {
cell-index = <0x0>;
compatible = "fsl,bman-portal";
reg = <0x0 0x4000>, <0x1000000 0x1000>;
interrupts = <105 2 0 0>;
};
bman-portal@4000 {
cell-index = <0x1>;
compatible = "fsl,bman-portal";
reg = <0x4000 0x4000>, <0x1001000 0x1000>;
interrupts = <107 2 0 0>;
};
bman-portal@8000 {
cell-index = <2>;
compatible = "fsl,bman-portal";
reg = <0x8000 0x4000>, <0x1002000 0x1000>;
interrupts = <109 2 0 0>;
};
bman-portal@c000 {
cell-index = <0x3>;
compatible = "fsl,bman-portal";
reg = <0xc000 0x4000>, <0x1003000 0x1000>;
interrupts = <111 2 0 0>;
};
bman-portal@10000 {
cell-index = <0x4>;
compatible = "fsl,bman-portal";
reg = <0x10000 0x4000>, <0x1004000 0x1000>;
interrupts = <113 2 0 0>;
};
bman-portal@14000 {
cell-index = <0x5>;
compatible = "fsl,bman-portal";
reg = <0x14000 0x4000>, <0x1005000 0x1000>;
interrupts = <115 2 0 0>;
};
};
&qportals {
#address-cells = <0x1>;
#size-cells = <0x1>;
compatible = "simple-bus";
qportal0: qman-portal@0 {
compatible = "fsl,qman-portal";
reg = <0x0 0x4000>, <0x1000000 0x1000>;
interrupts = <104 0x2 0 0>;
cell-index = <0x0>;
};
qportal1: qman-portal@4000 {
compatible = "fsl,qman-portal";
reg = <0x4000 0x4000>, <0x1001000 0x1000>;
interrupts = <106 0x2 0 0>;
cell-index = <0x1>;
};
qportal2: qman-portal@8000 {
compatible = "fsl,qman-portal";
reg = <0x8000 0x4000>, <0x1002000 0x1000>;
interrupts = <108 0x2 0 0>;
cell-index = <0x2>;
};
qportal3: qman-portal@c000 {
compatible = "fsl,qman-portal";
reg = <0xc000 0x4000>, <0x1003000 0x1000>;
interrupts = <110 0x2 0 0>;
cell-index = <0x3>;
};
qportal4: qman-portal@10000 {
compatible = "fsl,qman-portal";
reg = <0x10000 0x4000>, <0x1004000 0x1000>;
interrupts = <112 0x2 0 0>;
cell-index = <0x4>;
};
qportal5: qman-portal@14000 {
compatible = "fsl,qman-portal";
reg = <0x14000 0x4000>, <0x1005000 0x1000>;
interrupts = <114 0x2 0 0>;
cell-index = <0x5>;
};
};
&soc { &soc {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
...@@ -413,6 +514,8 @@ sata@220000 { ...@@ -413,6 +514,8 @@ sata@220000 {
}; };
/include/ "qoriq-sec5.0-0.dtsi" /include/ "qoriq-sec5.0-0.dtsi"
/include/ "qoriq-qman3.dtsi"
/include/ "qoriq-bman1.dtsi"
/include/ "qoriq-fman3l-0.dtsi" /include/ "qoriq-fman3l-0.dtsi"
/include/ "qoriq-fman3-0-10g-0-best-effort.dtsi" /include/ "qoriq-fman3-0-10g-0-best-effort.dtsi"
......
...@@ -41,6 +41,27 @@ / { ...@@ -41,6 +41,27 @@ / {
#size-cells = <2>; #size-cells = <2>;
interrupt-parent = <&mpic>; interrupt-parent = <&mpic>;
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
ranges;
bman_fbpr: bman-fbpr {
size = <0 0x1000000>;
alignment = <0 0x1000000>;
};
qman_fqd: qman-fqd {
size = <0 0x400000>;
alignment = <0 0x400000>;
};
qman_pfdr: qman-pfdr {
size = <0 0x2000000>;
alignment = <0 0x2000000>;
};
};
ifc: localbus@ffe124000 { ifc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x2000>; reg = <0xf 0xfe124000 0 0x2000>;
ranges = <0 0 0xf 0xe8000000 0x08000000 ranges = <0 0 0xf 0xe8000000 0x08000000
...@@ -80,6 +101,14 @@ dcsr: dcsr@f00000000 { ...@@ -80,6 +101,14 @@ dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01072000>; ranges = <0x00000000 0xf 0x00000000 0x01072000>;
}; };
bportals: bman-portals@ff4000000 {
ranges = <0x0 0xf 0xf4000000 0x2000000>;
};
qportals: qman-portals@ff6000000 {
ranges = <0x0 0xf 0xf6000000 0x2000000>;
};
soc: soc@ffe000000 { soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>; ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>; reg = <0xf 0xfe000000 0 0x00001000>;
......
...@@ -41,6 +41,31 @@ / { ...@@ -41,6 +41,31 @@ / {
#size-cells = <2>; #size-cells = <2>;
interrupt-parent = <&mpic>; interrupt-parent = <&mpic>;
aliases {
sg_2500_aqr105_phy4 = &sg_2500_aqr105_phy4;
};
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
ranges;
bman_fbpr: bman-fbpr {
size = <0 0x1000000>;
alignment = <0 0x1000000>;
};
qman_fqd: qman-fqd {
size = <0 0x400000>;
alignment = <0 0x400000>;
};
qman_pfdr: qman-pfdr {
size = <0 0x2000000>;
alignment = <0 0x2000000>;
};
};
ifc: localbus@ffe124000 { ifc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x2000>; reg = <0xf 0xfe124000 0 0x2000>;
ranges = <0 0 0xf 0xe8000000 0x08000000 ranges = <0 0 0xf 0xe8000000 0x08000000
...@@ -82,6 +107,14 @@ dcsr: dcsr@f00000000 { ...@@ -82,6 +107,14 @@ dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01072000>; ranges = <0x00000000 0xf 0x00000000 0x01072000>;
}; };
bportals: bman-portals@ff4000000 {
ranges = <0x0 0xf 0xf4000000 0x2000000>;
};
qportals: qman-portals@ff6000000 {
ranges = <0x0 0xf 0xf6000000 0x2000000>;
};
soc: soc@ffe000000 { soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>; ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>; reg = <0xf 0xfe000000 0 0x00001000>;
......
...@@ -48,6 +48,58 @@ cpld@3,0 { ...@@ -48,6 +48,58 @@ cpld@3,0 {
"fsl,deepsleep-cpld"; "fsl,deepsleep-cpld";
}; };
}; };
soc: soc@ffe000000 {
fman0: fman@400000 {
ethernet@e0000 {
phy-handle = <&phy_sgmii_0>;
phy-connection-type = "sgmii";
};
ethernet@e2000 {
phy-handle = <&phy_sgmii_1>;
phy-connection-type = "sgmii";
};
ethernet@e4000 {
phy-handle = <&phy_sgmii_2>;
phy-connection-type = "sgmii";
};
ethernet@e6000 {
phy-handle = <&phy_rgmii_0>;
phy-connection-type = "rgmii";
};
ethernet@e8000 {
phy-handle = <&phy_rgmii_1>;
phy-connection-type = "rgmii";
};
mdio0: mdio@fc000 {
phy_sgmii_0: ethernet-phy@02 {
reg = <0x02>;
};
phy_sgmii_1: ethernet-phy@03 {
reg = <0x03>;
};
phy_sgmii_2: ethernet-phy@01 {
reg = <0x01>;
};
phy_rgmii_0: ethernet-phy@04 {
reg = <0x04>;
};
phy_rgmii_1: ethernet-phy@05 {
reg = <0x05>;
};
};
};
};
}; };
#include "t1042si-post.dtsi" #include "t1042si-post.dtsi"
...@@ -125,6 +125,10 @@ flash@0 { ...@@ -125,6 +125,10 @@ flash@0 {
}; };
i2c@118000 { i2c@118000 {
hwmon@2f {
compatible = "winbond,w83793";
reg = <0x2f>;
};
eeprom@52 { eeprom@52 {
compatible = "at24,24c256"; compatible = "at24,24c256";
reg = <0x52>; reg = <0x52>;
......
...@@ -44,6 +44,7 @@ CONFIG_FORCE_MAX_ZONEORDER=13 ...@@ -44,6 +44,7 @@ CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAME_WARN=1024 CONFIG_FRAME_WARN=1024
CONFIG_FTL=y CONFIG_FTL=y
CONFIG_GPIO_GENERIC_PLATFORM=y
CONFIG_HFS_FS=m CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m CONFIG_HFSPLUS_FS=m
CONFIG_HIGH_RES_TIMERS=y CONFIG_HIGH_RES_TIMERS=y
...@@ -104,8 +105,13 @@ CONFIG_PACKET=y ...@@ -104,8 +105,13 @@ CONFIG_PACKET=y
CONFIG_PARTITION_ADVANCED=y CONFIG_PARTITION_ADVANCED=y
CONFIG_PERF_EVENTS=y CONFIG_PERF_EVENTS=y
CONFIG_POSIX_MQUEUE=y CONFIG_POSIX_MQUEUE=y
CONFIG_POWER_SUPPLY=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_GPIO=y
CONFIG_POWER_RESET_GPIO_RESTART=y
CONFIG_QNX4FS_FS=m CONFIG_QNX4FS_FS=m
CONFIG_RCU_TRACE=y CONFIG_RCU_TRACE=y
CONFIG_RESET_CONTROLLER=y
CONFIG_ROOT_NFS=y CONFIG_ROOT_NFS=y
CONFIG_SYSV_FS=m CONFIG_SYSV_FS=m
CONFIG_SYSVIPC=y CONFIG_SYSVIPC=y
......
...@@ -2,14 +2,42 @@ ...@@ -2,14 +2,42 @@
#define _ASM_POWERPC_BOOK3S_32_PGALLOC_H #define _ASM_POWERPC_BOOK3S_32_PGALLOC_H
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/slab.h>
/* For 32-bit, all levels of page tables are just drawn from get_free_page() */ /*
#define MAX_PGTABLE_INDEX_SIZE 0 * Functions that deal with pagetables that could be at any level of
* the table need to be passed an "index_size" so they know how to
* handle allocation. For PTE pages (which are linked to a struct
* page for now, and drawn from the main get_free_pages() pool), the
* allocation size will be (2^index_size * sizeof(pointer)) and
* allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
*
* The maximum index size needs to be big enough to allow any
* pagetable sizes we need, but small enough to fit in the low bits of
* any page table pointer. In other words all pagetables, even tiny
* ones, must be aligned to allow at least enough low 0 bits to
* contain this value. This value is also used as a mask, so it must
* be one less than a power of two.
*/
#define MAX_PGTABLE_INDEX_SIZE 0xf
extern void __bad_pte(pmd_t *pmd); extern void __bad_pte(pmd_t *pmd);
extern pgd_t *pgd_alloc(struct mm_struct *mm); extern struct kmem_cache *pgtable_cache[];
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); #define PGT_CACHE(shift) ({ \
BUG_ON(!(shift)); \
pgtable_cache[(shift) - 1]; \
})
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
}
/* /*
* We don't have any real pmd's, and this code never triggers because * We don't have any real pmd's, and this code never triggers because
...@@ -68,8 +96,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) ...@@ -68,8 +96,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
static inline void pgtable_free(void *table, unsigned index_size) static inline void pgtable_free(void *table, unsigned index_size)
{ {
BUG_ON(index_size); /* 32-bit doesn't use this */ if (!index_size) {
free_page((unsigned long)table); free_page((unsigned long)table);
} else {
BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
kmem_cache_free(PGT_CACHE(index_size), table);
}
} }
#define check_pgt_cache() do { } while (0) #define check_pgt_cache() do { } while (0)
......
...@@ -8,6 +8,23 @@ ...@@ -8,6 +8,23 @@
/* And here we include common definitions */ /* And here we include common definitions */
#include <asm/pte-common.h> #include <asm/pte-common.h>
#define PTE_INDEX_SIZE PTE_SHIFT
#define PMD_INDEX_SIZE 0
#define PUD_INDEX_SIZE 0
#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
#define PMD_CACHE_INDEX PMD_INDEX_SIZE
#ifndef __ASSEMBLY__
#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
#define PMD_TABLE_SIZE 0
#define PUD_TABLE_SIZE 0
#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
#endif /* __ASSEMBLY__ */
#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
/* /*
* The normal case is that PTEs are 32-bits and we have a 1-page * The normal case is that PTEs are 32-bits and we have a 1-page
* 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
...@@ -19,14 +36,10 @@ ...@@ -19,14 +36,10 @@
* -Matt * -Matt
*/ */
/* PGDIR_SHIFT determines what a top-level page table entry can map */ /* PGDIR_SHIFT determines what a top-level page table entry can map */
#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT) #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
#define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PTRS_PER_PTE (1 << PTE_SHIFT)
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
/* /*
* This is the bottom of the PKMAP area with HIGHMEM or an arbitrary * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
...@@ -82,12 +95,8 @@ ...@@ -82,12 +95,8 @@
extern unsigned long ioremap_bot; extern unsigned long ioremap_bot;
/* /* Bits to mask out from a PGD to get to the PUD page */
* entries per page directory level: our page-table tree is two-level, so #define PGD_MASKED_BITS 0
* we don't really have any PMD directory.
*/
#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
#define pte_ERROR(e) \ #define pte_ERROR(e) \
pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
...@@ -284,15 +293,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm, ...@@ -284,15 +293,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
#ifndef CONFIG_PPC_4K_PAGES
void pgtable_cache_init(void);
#else
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
#endif
extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
pmd_t **pmdp); pmd_t **pmdp);
......
...@@ -800,9 +800,6 @@ extern struct page *pgd_page(pgd_t pgd); ...@@ -800,9 +800,6 @@ extern struct page *pgd_page(pgd_t pgd);
#define pgd_ERROR(e) \ #define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
void pgtable_cache_init(void);
static inline int map_kernel_page(unsigned long ea, unsigned long pa, static inline int map_kernel_page(unsigned long ea, unsigned long pa,
unsigned long flags) unsigned long flags)
{ {
......
...@@ -51,12 +51,20 @@ static inline void __local_flush_hugetlb_page(struct vm_area_struct *vma, ...@@ -51,12 +51,20 @@ static inline void __local_flush_hugetlb_page(struct vm_area_struct *vma,
static inline pte_t *hugepd_page(hugepd_t hpd) static inline pte_t *hugepd_page(hugepd_t hpd)
{ {
BUG_ON(!hugepd_ok(hpd)); BUG_ON(!hugepd_ok(hpd));
#ifdef CONFIG_PPC_8xx
return (pte_t *)__va(hpd.pd & ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
#else
return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE); return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
#endif
} }
static inline unsigned int hugepd_shift(hugepd_t hpd) static inline unsigned int hugepd_shift(hugepd_t hpd)
{ {
#ifdef CONFIG_PPC_8xx
return ((hpd.pd & _PMD_PAGE_MASK) >> 1) + 17;
#else
return hpd.pd & HUGEPD_SHIFT_MASK; return hpd.pd & HUGEPD_SHIFT_MASK;
#endif
} }
#endif /* CONFIG_PPC_BOOK3S_64 */ #endif /* CONFIG_PPC_BOOK3S_64 */
...@@ -99,7 +107,15 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, ...@@ -99,7 +107,15 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
pte_t pte); pte_t pte);
#ifdef CONFIG_PPC_8xx
static inline void flush_hugetlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
flush_tlb_page(vma, vmaddr);
}
#else
void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
#endif
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long end, unsigned long floor,
...@@ -205,7 +221,8 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, ...@@ -205,7 +221,8 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
* are reserved early in the boot process by memblock instead of via * are reserved early in the boot process by memblock instead of via
* the .dts as on IBM platforms. * the .dts as on IBM platforms.
*/ */
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E) #if defined(CONFIG_HUGETLB_PAGE) && (defined(CONFIG_PPC_FSL_BOOK3E) || \
defined(CONFIG_PPC_8xx))
extern void __init reserve_hugetlb_gpages(void); extern void __init reserve_hugetlb_gpages(void);
#else #else
static inline void reserve_hugetlb_gpages(void) static inline void reserve_hugetlb_gpages(void)
......
...@@ -172,6 +172,41 @@ typedef struct { ...@@ -172,6 +172,41 @@ typedef struct {
#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000) #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE)) #define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
/* Page size definitions, common between 32 and 64-bit
*
* shift : is the "PAGE_SHIFT" value for that page size
* penc : is the pte encoding mask
*
*/
struct mmu_psize_def {
unsigned int shift; /* number of bits */
unsigned int enc; /* PTE encoding */
unsigned int ind; /* Corresponding indirect page size shift */
unsigned int flags;
#define MMU_PAGE_SIZE_DIRECT 0x1 /* Supported as a direct size */
#define MMU_PAGE_SIZE_INDIRECT 0x2 /* Supported as an indirect size */
};
extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
static inline int shift_to_mmu_psize(unsigned int shift)
{
int psize;
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
if (mmu_psize_defs[psize].shift == shift)
return psize;
return -1;
}
static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
{
if (mmu_psize_defs[mmu_psize].shift)
return mmu_psize_defs[mmu_psize].shift;
BUG();
}
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#if defined(CONFIG_PPC_4K_PAGES) #if defined(CONFIG_PPC_4K_PAGES)
......
...@@ -269,19 +269,20 @@ static inline bool early_radix_enabled(void) ...@@ -269,19 +269,20 @@ static inline bool early_radix_enabled(void)
#define MMU_PAGE_64K 2 #define MMU_PAGE_64K 2
#define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */ #define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */
#define MMU_PAGE_256K 4 #define MMU_PAGE_256K 4
#define MMU_PAGE_1M 5 #define MMU_PAGE_512K 5
#define MMU_PAGE_2M 6 #define MMU_PAGE_1M 6
#define MMU_PAGE_4M 7 #define MMU_PAGE_2M 7
#define MMU_PAGE_8M 8 #define MMU_PAGE_4M 8
#define MMU_PAGE_16M 9 #define MMU_PAGE_8M 9
#define MMU_PAGE_64M 10 #define MMU_PAGE_16M 10
#define MMU_PAGE_256M 11 #define MMU_PAGE_64M 11
#define MMU_PAGE_1G 12 #define MMU_PAGE_256M 12
#define MMU_PAGE_16G 13 #define MMU_PAGE_1G 13
#define MMU_PAGE_64G 14 #define MMU_PAGE_16G 14
#define MMU_PAGE_64G 15
/* N.B. we need to change the type of hpte_page_sizes if this gets to be > 16 */ /* N.B. we need to change the type of hpte_page_sizes if this gets to be > 16 */
#define MMU_PAGE_COUNT 15 #define MMU_PAGE_COUNT 16
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/mmu.h> #include <asm/book3s/64/mmu.h>
......
...@@ -2,14 +2,42 @@ ...@@ -2,14 +2,42 @@
#define _ASM_POWERPC_PGALLOC_32_H #define _ASM_POWERPC_PGALLOC_32_H
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/slab.h>
/* For 32-bit, all levels of page tables are just drawn from get_free_page() */ /*
#define MAX_PGTABLE_INDEX_SIZE 0 * Functions that deal with pagetables that could be at any level of
* the table need to be passed an "index_size" so they know how to
* handle allocation. For PTE pages (which are linked to a struct
* page for now, and drawn from the main get_free_pages() pool), the
* allocation size will be (2^index_size * sizeof(pointer)) and
* allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
*
* The maximum index size needs to be big enough to allow any
* pagetable sizes we need, but small enough to fit in the low bits of
* any page table pointer. In other words all pagetables, even tiny
* ones, must be aligned to allow at least enough low 0 bits to
* contain this value. This value is also used as a mask, so it must
* be one less than a power of two.
*/
#define MAX_PGTABLE_INDEX_SIZE 0xf
extern void __bad_pte(pmd_t *pmd); extern void __bad_pte(pmd_t *pmd);
extern pgd_t *pgd_alloc(struct mm_struct *mm); extern struct kmem_cache *pgtable_cache[];
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); #define PGT_CACHE(shift) ({ \
BUG_ON(!(shift)); \
pgtable_cache[(shift) - 1]; \
})
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
}
/* /*
* We don't have any real pmd's, and this code never triggers because * We don't have any real pmd's, and this code never triggers because
...@@ -68,8 +96,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) ...@@ -68,8 +96,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
static inline void pgtable_free(void *table, unsigned index_size) static inline void pgtable_free(void *table, unsigned index_size)
{ {
BUG_ON(index_size); /* 32-bit doesn't use this */ if (!index_size) {
free_page((unsigned long)table); free_page((unsigned long)table);
} else {
BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
kmem_cache_free(PGT_CACHE(index_size), table);
}
} }
#define check_pgt_cache() do { } while (0) #define check_pgt_cache() do { } while (0)
......
...@@ -16,6 +16,23 @@ extern int icache_44x_need_flush; ...@@ -16,6 +16,23 @@ extern int icache_44x_need_flush;
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#define PTE_INDEX_SIZE PTE_SHIFT
#define PMD_INDEX_SIZE 0
#define PUD_INDEX_SIZE 0
#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
#define PMD_CACHE_INDEX PMD_INDEX_SIZE
#ifndef __ASSEMBLY__
#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
#define PMD_TABLE_SIZE 0
#define PUD_TABLE_SIZE 0
#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
#endif /* __ASSEMBLY__ */
#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
/* /*
* The normal case is that PTEs are 32-bits and we have a 1-page * The normal case is that PTEs are 32-bits and we have a 1-page
* 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
...@@ -27,22 +44,12 @@ extern int icache_44x_need_flush; ...@@ -27,22 +44,12 @@ extern int icache_44x_need_flush;
* -Matt * -Matt
*/ */
/* PGDIR_SHIFT determines what a top-level page table entry can map */ /* PGDIR_SHIFT determines what a top-level page table entry can map */
#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT) #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
#define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PGDIR_MASK (~(PGDIR_SIZE-1))
/* /* Bits to mask out from a PGD to get to the PUD page */
* entries per page directory level: our page-table tree is two-level, so #define PGD_MASKED_BITS 0
* we don't really have any PMD directory.
*/
#ifndef __ASSEMBLY__
#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
#endif /* __ASSEMBLY__ */
#define PTRS_PER_PTE (1 << PTE_SHIFT)
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0UL #define FIRST_USER_ADDRESS 0UL
...@@ -329,15 +336,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm, ...@@ -329,15 +336,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
#ifndef CONFIG_PPC_4K_PAGES
void pgtable_cache_init(void);
#else
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
#endif
extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
pmd_t **pmdp); pmd_t **pmdp);
......
...@@ -49,6 +49,7 @@ ...@@ -49,6 +49,7 @@
#define _PMD_BAD 0x0ff0 #define _PMD_BAD 0x0ff0
#define _PMD_PAGE_MASK 0x000c #define _PMD_PAGE_MASK 0x000c
#define _PMD_PAGE_8M 0x000c #define _PMD_PAGE_8M 0x000c
#define _PMD_PAGE_512K 0x0004
/* Until my rework is finished, 8xx still needs atomic PTE updates */ /* Until my rework is finished, 8xx still needs atomic PTE updates */
#define PTE_ATOMIC_UPDATES 1 #define PTE_ATOMIC_UPDATES 1
......
...@@ -347,8 +347,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm, ...@@ -347,8 +347,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
#define __swp_entry_to_pte(x) __pte((x).val) #define __swp_entry_to_pte(x) __pte((x).val)
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
void pgtable_cache_init(void);
extern int map_kernel_page(unsigned long ea, unsigned long pa, extern int map_kernel_page(unsigned long ea, unsigned long pa,
unsigned long flags); unsigned long flags);
extern int __meminit vmemmap_create_mapping(unsigned long start, extern int __meminit vmemmap_create_mapping(unsigned long start,
......
...@@ -226,7 +226,11 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, ...@@ -226,7 +226,11 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
static inline int hugepd_ok(hugepd_t hpd) static inline int hugepd_ok(hugepd_t hpd)
{ {
#ifdef CONFIG_PPC_8xx
return ((hpd.pd & 0x4) != 0);
#else
return (hpd.pd > 0); return (hpd.pd > 0);
#endif
} }
static inline int pmd_huge(pmd_t pmd) static inline int pmd_huge(pmd_t pmd)
......
...@@ -78,6 +78,8 @@ static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, ...@@ -78,6 +78,8 @@ static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
unsigned long vmalloc_to_phys(void *vmalloc_addr); unsigned long vmalloc_to_phys(void *vmalloc_addr);
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
void pgtable_cache_init(void);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PGTABLE_H */ #endif /* _ASM_POWERPC_PGTABLE_H */
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
#ifndef _ASM_POWERPC_REG_8xx_H #ifndef _ASM_POWERPC_REG_8xx_H
#define _ASM_POWERPC_REG_8xx_H #define _ASM_POWERPC_REG_8xx_H
#include <asm/mmu-8xx.h> #include <asm/mmu.h>
/* Cache control on the MPC8xx is provided through some additional /* Cache control on the MPC8xx is provided through some additional
* special purpose registers. * special purpose registers.
......
...@@ -73,6 +73,9 @@ ...@@ -73,6 +73,9 @@
#define RPN_PATTERN 0x00f0 #define RPN_PATTERN 0x00f0
#endif #endif
#define PAGE_SHIFT_512K 19
#define PAGE_SHIFT_8M 23
__HEAD __HEAD
_ENTRY(_stext); _ENTRY(_stext);
_ENTRY(_start); _ENTRY(_start);
...@@ -322,7 +325,7 @@ SystemCall: ...@@ -322,7 +325,7 @@ SystemCall:
#endif #endif
InstructionTLBMiss: InstructionTLBMiss:
#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) #if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
mtspr SPRN_SPRG_SCRATCH2, r3 mtspr SPRN_SPRG_SCRATCH2, r3
#endif #endif
EXCEPTION_PROLOG_0 EXCEPTION_PROLOG_0
...@@ -332,10 +335,12 @@ InstructionTLBMiss: ...@@ -332,10 +335,12 @@ InstructionTLBMiss:
*/ */
mfspr r10, SPRN_SRR0 /* Get effective address of fault */ mfspr r10, SPRN_SRR0 /* Get effective address of fault */
INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10) INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10)
#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
/* Only modules will cause ITLB Misses as we always /* Only modules will cause ITLB Misses as we always
* pin the first 8MB of kernel memory */ * pin the first 8MB of kernel memory */
#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
mfcr r3 mfcr r3
#endif
#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
IS_KERNEL(r11, r10) IS_KERNEL(r11, r10)
#endif #endif
mfspr r11, SPRN_M_TW /* Get level 1 table */ mfspr r11, SPRN_M_TW /* Get level 1 table */
...@@ -343,7 +348,6 @@ InstructionTLBMiss: ...@@ -343,7 +348,6 @@ InstructionTLBMiss:
BRANCH_UNLESS_KERNEL(3f) BRANCH_UNLESS_KERNEL(3f)
lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
3: 3:
mtcr r3
#endif #endif
/* Insert level 1 index */ /* Insert level 1 index */
rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
...@@ -351,14 +355,25 @@ InstructionTLBMiss: ...@@ -351,14 +355,25 @@ InstructionTLBMiss:
/* Extract level 2 index */ /* Extract level 2 index */
rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29 rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
#ifdef CONFIG_HUGETLB_PAGE
mtcr r11
bt- 28, 10f /* bit 28 = Large page (8M) */
bt- 29, 20f /* bit 29 = Large page (8M or 512k) */
#endif
rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */ rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */
lwz r10, 0(r10) /* Get the pte */ lwz r10, 0(r10) /* Get the pte */
4:
#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
mtcr r3
#endif
/* Insert the APG into the TWC from the Linux PTE. */ /* Insert the APG into the TWC from the Linux PTE. */
rlwimi r11, r10, 0, 25, 26 rlwimi r11, r10, 0, 25, 26
/* Load the MI_TWC with the attributes for this "segment." */ /* Load the MI_TWC with the attributes for this "segment." */
MTSPR_CPU6(SPRN_MI_TWC, r11, r3) /* Set segment attributes */ MTSPR_CPU6(SPRN_MI_TWC, r11, r3) /* Set segment attributes */
#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
rlwimi r10, r11, 1, MI_SPS16K
#endif
#ifdef CONFIG_SWAP #ifdef CONFIG_SWAP
rlwinm r11, r10, 32-5, _PAGE_PRESENT rlwinm r11, r10, 32-5, _PAGE_PRESENT
and r11, r11, r10 and r11, r11, r10
...@@ -371,16 +386,45 @@ InstructionTLBMiss: ...@@ -371,16 +386,45 @@ InstructionTLBMiss:
* set. All other Linux PTE bits control the behavior * set. All other Linux PTE bits control the behavior
* of the MMU. * of the MMU.
*/ */
#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
rlwimi r10, r11, 0, 0x0ff0 /* Set 24-27, clear 20-23 */
#else
rlwimi r10, r11, 0, 0x0ff8 /* Set 24-27, clear 20-23,28 */ rlwimi r10, r11, 0, 0x0ff8 /* Set 24-27, clear 20-23,28 */
#endif
MTSPR_CPU6(SPRN_MI_RPN, r10, r3) /* Update TLB entry */ MTSPR_CPU6(SPRN_MI_RPN, r10, r3) /* Update TLB entry */
/* Restore registers */ /* Restore registers */
#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) #if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
mfspr r3, SPRN_SPRG_SCRATCH2 mfspr r3, SPRN_SPRG_SCRATCH2
#endif #endif
EXCEPTION_EPILOG_0 EXCEPTION_EPILOG_0
rfi rfi
#ifdef CONFIG_HUGETLB_PAGE
10: /* 8M pages */
#ifdef CONFIG_PPC_16K_PAGES
/* Extract level 2 index */
rlwinm r10, r10, 32 - (PAGE_SHIFT_8M - PAGE_SHIFT), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29
/* Add level 2 base */
rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1
#else
/* Level 2 base */
rlwinm r10, r11, 0, ~HUGEPD_SHIFT_MASK
#endif
lwz r10, 0(r10) /* Get the pte */
rlwinm r11, r11, 0, 0xf
b 4b
20: /* 512k pages */
/* Extract level 2 index */
rlwinm r10, r10, 32 - (PAGE_SHIFT_512K - PAGE_SHIFT), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29
/* Add level 2 base */
rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
lwz r10, 0(r10) /* Get the pte */
rlwinm r11, r11, 0, 0xf
b 4b
#endif
. = 0x1200 . = 0x1200
DataStoreTLBMiss: DataStoreTLBMiss:
mtspr SPRN_SPRG_SCRATCH2, r3 mtspr SPRN_SPRG_SCRATCH2, r3
...@@ -407,7 +451,6 @@ _ENTRY(DTLBMiss_jmp) ...@@ -407,7 +451,6 @@ _ENTRY(DTLBMiss_jmp)
#endif #endif
blt cr7, DTLBMissLinear blt cr7, DTLBMissLinear
3: 3:
mtcr r3
mfspr r10, SPRN_MD_EPN mfspr r10, SPRN_MD_EPN
/* Insert level 1 index */ /* Insert level 1 index */
...@@ -418,8 +461,15 @@ _ENTRY(DTLBMiss_jmp) ...@@ -418,8 +461,15 @@ _ENTRY(DTLBMiss_jmp)
*/ */
/* Extract level 2 index */ /* Extract level 2 index */
rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29 rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
#ifdef CONFIG_HUGETLB_PAGE
mtcr r11
bt- 28, 10f /* bit 28 = Large page (8M) */
bt- 29, 20f /* bit 29 = Large page (8M or 512k) */
#endif
rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */ rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */
lwz r10, 0(r10) /* Get the pte */ lwz r10, 0(r10) /* Get the pte */
4:
mtcr r3
/* Insert the Guarded flag and APG into the TWC from the Linux PTE. /* Insert the Guarded flag and APG into the TWC from the Linux PTE.
* It is bit 26-27 of both the Linux PTE and the TWC (at least * It is bit 26-27 of both the Linux PTE and the TWC (at least
...@@ -434,6 +484,11 @@ _ENTRY(DTLBMiss_jmp) ...@@ -434,6 +484,11 @@ _ENTRY(DTLBMiss_jmp)
rlwimi r11, r10, 32-5, 30, 30 rlwimi r11, r10, 32-5, 30, 30
MTSPR_CPU6(SPRN_MD_TWC, r11, r3) MTSPR_CPU6(SPRN_MD_TWC, r11, r3)
/* In 4k pages mode, SPS (bit 28) in RPN must match PS[1] (bit 29)
* In 16k pages mode, SPS is always 1 */
#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
rlwimi r10, r11, 1, MD_SPS16K
#endif
/* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set. /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
* We also need to know if the insn is a load/store, so: * We also need to know if the insn is a load/store, so:
* Clear _PAGE_PRESENT and load that which will * Clear _PAGE_PRESENT and load that which will
...@@ -455,7 +510,11 @@ _ENTRY(DTLBMiss_jmp) ...@@ -455,7 +510,11 @@ _ENTRY(DTLBMiss_jmp)
* of the MMU. * of the MMU.
*/ */
li r11, RPN_PATTERN li r11, RPN_PATTERN
#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
rlwimi r10, r11, 0, 24, 27 /* Set 24-27 */
#else
rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
#endif
rlwimi r10, r11, 0, 20, 20 /* clear 20 */ rlwimi r10, r11, 0, 20, 20 /* clear 20 */
MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */ MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */
...@@ -465,6 +524,30 @@ _ENTRY(DTLBMiss_jmp) ...@@ -465,6 +524,30 @@ _ENTRY(DTLBMiss_jmp)
EXCEPTION_EPILOG_0 EXCEPTION_EPILOG_0
rfi rfi
#ifdef CONFIG_HUGETLB_PAGE
10: /* 8M pages */
/* Extract level 2 index */
#ifdef CONFIG_PPC_16K_PAGES
rlwinm r10, r10, 32 - (PAGE_SHIFT_8M - PAGE_SHIFT), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29
/* Add level 2 base */
rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1
#else
/* Level 2 base */
rlwinm r10, r11, 0, ~HUGEPD_SHIFT_MASK
#endif
lwz r10, 0(r10) /* Get the pte */
rlwinm r11, r11, 0, 0xf
b 4b
20: /* 512k pages */
/* Extract level 2 index */
rlwinm r10, r10, 32 - (PAGE_SHIFT_512K - PAGE_SHIFT), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29
/* Add level 2 base */
rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
lwz r10, 0(r10) /* Get the pte */
rlwinm r11, r11, 0, 0xf
b 4b
#endif
/* This is an instruction TLB error on the MPC8xx. This could be due /* This is an instruction TLB error on the MPC8xx. This could be due
* to many reasons, such as executing guarded memory or illegal instruction * to many reasons, such as executing guarded memory or illegal instruction
...@@ -586,6 +669,9 @@ _ENTRY(FixupDAR_cmp) ...@@ -586,6 +669,9 @@ _ENTRY(FixupDAR_cmp)
/* Insert level 1 index */ /* Insert level 1 index */
3: rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 3: rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */ lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */
mtcr r11
bt 28,200f /* bit 28 = Large page (8M) */
bt 29,202f /* bit 29 = Large page (8M or 512K) */
rlwinm r11, r11,0,0,19 /* Extract page descriptor page address */ rlwinm r11, r11,0,0,19 /* Extract page descriptor page address */
/* Insert level 2 index */ /* Insert level 2 index */
rlwimi r11, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29 rlwimi r11, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
...@@ -611,6 +697,27 @@ _ENTRY(FixupDAR_cmp) ...@@ -611,6 +697,27 @@ _ENTRY(FixupDAR_cmp)
141: mfspr r10,SPRN_SPRG_SCRATCH2 141: mfspr r10,SPRN_SPRG_SCRATCH2
b DARFixed /* Nope, go back to normal TLB processing */ b DARFixed /* Nope, go back to normal TLB processing */
/* concat physical page address(r11) and page offset(r10) */
200:
#ifdef CONFIG_PPC_16K_PAGES
rlwinm r11, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1
rlwimi r11, r10, 32 - (PAGE_SHIFT_8M - 2), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29
#else
rlwinm r11, r10, 0, ~HUGEPD_SHIFT_MASK
#endif
lwz r11, 0(r11) /* Get the pte */
/* concat physical page address(r11) and page offset(r10) */
rlwimi r11, r10, 0, 32 - PAGE_SHIFT_8M, 31
b 201b
202:
rlwinm r11, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
rlwimi r11, r10, 32 - (PAGE_SHIFT_512K - 2), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29
lwz r11, 0(r11) /* Get the pte */
/* concat physical page address(r11) and page offset(r10) */
rlwimi r11, r10, 0, 32 - PAGE_SHIFT_512K, 31
b 201b
144: mfspr r10, SPRN_DSISR 144: mfspr r10, SPRN_DSISR
rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */ rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */
mtspr SPRN_DSISR, r10 mtspr SPRN_DSISR, r10
......
...@@ -7,7 +7,8 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror ...@@ -7,7 +7,8 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
obj-y := fault.o mem.o pgtable.o mmap.o \ obj-y := fault.o mem.o pgtable.o mmap.o \
init_$(BITS).o pgtable_$(BITS).o init_$(BITS).o pgtable_$(BITS).o \
init-common.o
obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \ obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
tlb_nohash_low.o tlb_nohash_low.o
obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o
......
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
#define PAGE_SHIFT_64K 16 #define PAGE_SHIFT_64K 16
#define PAGE_SHIFT_512K 19
#define PAGE_SHIFT_8M 23
#define PAGE_SHIFT_16M 24 #define PAGE_SHIFT_16M 24
#define PAGE_SHIFT_16G 34 #define PAGE_SHIFT_16G 34
...@@ -38,7 +40,7 @@ unsigned int HPAGE_SHIFT; ...@@ -38,7 +40,7 @@ unsigned int HPAGE_SHIFT;
* implementations may have more than one gpage size, so we need multiple * implementations may have more than one gpage size, so we need multiple
* arrays * arrays
*/ */
#ifdef CONFIG_PPC_FSL_BOOK3E #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
#define MAX_NUMBER_GPAGES 128 #define MAX_NUMBER_GPAGES 128
struct psize_gpages { struct psize_gpages {
u64 gpage_list[MAX_NUMBER_GPAGES]; u64 gpage_list[MAX_NUMBER_GPAGES];
...@@ -64,14 +66,16 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, ...@@ -64,14 +66,16 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
pte_t *new; pte_t *new;
#ifdef CONFIG_PPC_FSL_BOOK3E
int i; int i;
int num_hugepd = 1 << (pshift - pdshift); int num_hugepd;
cachep = hugepte_cache;
#else if (pshift >= pdshift) {
cachep = PGT_CACHE(pdshift - pshift); cachep = hugepte_cache;
#endif num_hugepd = 1 << (pshift - pdshift);
} else {
cachep = PGT_CACHE(pdshift - pshift);
num_hugepd = 1;
}
new = kmem_cache_zalloc(cachep, GFP_KERNEL); new = kmem_cache_zalloc(cachep, GFP_KERNEL);
...@@ -89,7 +93,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, ...@@ -89,7 +93,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
smp_wmb(); smp_wmb();
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
#ifdef CONFIG_PPC_FSL_BOOK3E
/* /*
* We have multiple higher-level entries that point to the same * We have multiple higher-level entries that point to the same
* actual pte location. Fill in each as we go and backtrack on error. * actual pte location. Fill in each as we go and backtrack on error.
...@@ -100,8 +104,18 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, ...@@ -100,8 +104,18 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
if (unlikely(!hugepd_none(*hpdp))) if (unlikely(!hugepd_none(*hpdp)))
break; break;
else else
#ifdef CONFIG_PPC_BOOK3S_64
hpdp->pd = __pa(new) |
(shift_to_mmu_psize(pshift) << 2);
#elif defined(CONFIG_PPC_8xx)
hpdp->pd = __pa(new) |
(pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
_PMD_PAGE_512K) |
_PMD_PRESENT;
#else
/* We use the old format for PPC_FSL_BOOK3E */ /* We use the old format for PPC_FSL_BOOK3E */
hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
#endif
} }
/* If we bailed from the for loop early, an error occurred, clean up */ /* If we bailed from the for loop early, an error occurred, clean up */
if (i < num_hugepd) { if (i < num_hugepd) {
...@@ -109,17 +123,6 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, ...@@ -109,17 +123,6 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
hpdp->pd = 0; hpdp->pd = 0;
kmem_cache_free(cachep, new); kmem_cache_free(cachep, new);
} }
#else
if (!hugepd_none(*hpdp))
kmem_cache_free(cachep, new);
else {
#ifdef CONFIG_PPC_BOOK3S_64
hpdp->pd = __pa(new) | (shift_to_mmu_psize(pshift) << 2);
#else
hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
#endif
}
#endif
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
return 0; return 0;
} }
...@@ -128,7 +131,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, ...@@ -128,7 +131,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
* These macros define how to determine which level of the page table holds * These macros define how to determine which level of the page table holds
* the hpdp. * the hpdp.
*/ */
#ifdef CONFIG_PPC_FSL_BOOK3E #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
#define HUGEPD_PGD_SHIFT PGDIR_SHIFT #define HUGEPD_PGD_SHIFT PGDIR_SHIFT
#define HUGEPD_PUD_SHIFT PUD_SHIFT #define HUGEPD_PUD_SHIFT PUD_SHIFT
#else #else
...@@ -136,7 +139,6 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, ...@@ -136,7 +139,6 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
#define HUGEPD_PUD_SHIFT PMD_SHIFT #define HUGEPD_PUD_SHIFT PMD_SHIFT
#endif #endif
#ifdef CONFIG_PPC_BOOK3S_64
/* /*
* At this point we do the placement change only for BOOK3S 64. This would * At this point we do the placement change only for BOOK3S 64. This would
* possibly work on other subarchs. * possibly work on other subarchs.
...@@ -153,6 +155,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz ...@@ -153,6 +155,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
addr &= ~(sz-1); addr &= ~(sz-1);
pg = pgd_offset(mm, addr); pg = pgd_offset(mm, addr);
#ifdef CONFIG_PPC_BOOK3S_64
if (pshift == PGDIR_SHIFT) if (pshift == PGDIR_SHIFT)
/* 16GB huge page */ /* 16GB huge page */
return (pte_t *) pg; return (pte_t *) pg;
...@@ -178,32 +181,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz ...@@ -178,32 +181,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
hpdp = (hugepd_t *)pm; hpdp = (hugepd_t *)pm;
} }
} }
if (!hpdp)
return NULL;
BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
return NULL;
return hugepte_offset(*hpdp, addr, pdshift);
}
#else #else
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
pgd_t *pg;
pud_t *pu;
pmd_t *pm;
hugepd_t *hpdp = NULL;
unsigned pshift = __ffs(sz);
unsigned pdshift = PGDIR_SHIFT;
addr &= ~(sz-1);
pg = pgd_offset(mm, addr);
if (pshift >= HUGEPD_PGD_SHIFT) { if (pshift >= HUGEPD_PGD_SHIFT) {
hpdp = (hugepd_t *)pg; hpdp = (hugepd_t *)pg;
} else { } else {
...@@ -217,7 +195,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz ...@@ -217,7 +195,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
hpdp = (hugepd_t *)pm; hpdp = (hugepd_t *)pm;
} }
} }
#endif
if (!hpdp) if (!hpdp)
return NULL; return NULL;
...@@ -228,9 +206,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz ...@@ -228,9 +206,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
return hugepte_offset(*hpdp, addr, pdshift); return hugepte_offset(*hpdp, addr, pdshift);
} }
#endif
#ifdef CONFIG_PPC_FSL_BOOK3E #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
/* Build list of addresses of gigantic pages. This function is used in early /* Build list of addresses of gigantic pages. This function is used in early
* boot before the buddy allocator is setup. * boot before the buddy allocator is setup.
*/ */
...@@ -310,7 +287,11 @@ static int __init do_gpage_early_setup(char *param, char *val, ...@@ -310,7 +287,11 @@ static int __init do_gpage_early_setup(char *param, char *val,
npages = 0; npages = 0;
if (npages > MAX_NUMBER_GPAGES) { if (npages > MAX_NUMBER_GPAGES) {
pr_warn("MMU: %lu pages requested for page " pr_warn("MMU: %lu pages requested for page "
#ifdef CONFIG_PHYS_ADDR_T_64BIT
"size %llu KB, limiting to " "size %llu KB, limiting to "
#else
"size %u KB, limiting to "
#endif
__stringify(MAX_NUMBER_GPAGES) "\n", __stringify(MAX_NUMBER_GPAGES) "\n",
npages, size / 1024); npages, size / 1024);
npages = MAX_NUMBER_GPAGES; npages = MAX_NUMBER_GPAGES;
...@@ -392,7 +373,7 @@ int alloc_bootmem_huge_page(struct hstate *hstate) ...@@ -392,7 +373,7 @@ int alloc_bootmem_huge_page(struct hstate *hstate)
} }
#endif #endif
#ifdef CONFIG_PPC_FSL_BOOK3E #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
#define HUGEPD_FREELIST_SIZE \ #define HUGEPD_FREELIST_SIZE \
((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t)) ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
...@@ -442,6 +423,8 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte) ...@@ -442,6 +423,8 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
} }
put_cpu_var(hugepd_freelist_cur); put_cpu_var(hugepd_freelist_cur);
} }
#else
static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
#endif #endif
static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift, static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
...@@ -453,13 +436,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif ...@@ -453,13 +436,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
unsigned long pdmask = ~((1UL << pdshift) - 1); unsigned long pdmask = ~((1UL << pdshift) - 1);
unsigned int num_hugepd = 1; unsigned int num_hugepd = 1;
unsigned int shift = hugepd_shift(*hpdp);
#ifdef CONFIG_PPC_FSL_BOOK3E
/* Note: On fsl the hpdp may be the first of several */ /* Note: On fsl the hpdp may be the first of several */
num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift)); if (shift > pdshift)
#else num_hugepd = 1 << (shift - pdshift);
unsigned int shift = hugepd_shift(*hpdp);
#endif
start &= pdmask; start &= pdmask;
if (start < floor) if (start < floor)
...@@ -475,11 +456,10 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif ...@@ -475,11 +456,10 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
for (i = 0; i < num_hugepd; i++, hpdp++) for (i = 0; i < num_hugepd; i++, hpdp++)
hpdp->pd = 0; hpdp->pd = 0;
#ifdef CONFIG_PPC_FSL_BOOK3E if (shift >= pdshift)
hugepd_free(tlb, hugepte); hugepd_free(tlb, hugepte);
#else else
pgtable_free_tlb(tlb, hugepte, pdshift - shift); pgtable_free_tlb(tlb, hugepte, pdshift - shift);
#endif
} }
static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
...@@ -492,6 +472,8 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, ...@@ -492,6 +472,8 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
start = addr; start = addr;
do { do {
unsigned long more;
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
if (!is_hugepd(__hugepd(pmd_val(*pmd)))) { if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
...@@ -502,15 +484,16 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, ...@@ -502,15 +484,16 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
WARN_ON(!pmd_none_or_clear_bad(pmd)); WARN_ON(!pmd_none_or_clear_bad(pmd));
continue; continue;
} }
#ifdef CONFIG_PPC_FSL_BOOK3E
/* /*
* Increment next by the size of the huge mapping since * Increment next by the size of the huge mapping since
* there may be more than one entry at this level for a * there may be more than one entry at this level for a
* single hugepage, but all of them point to * single hugepage, but all of them point to
* the same kmem cache that holds the hugepte. * the same kmem cache that holds the hugepte.
*/ */
next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd)); more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
#endif if (more > next)
next = more;
free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
addr, next, floor, ceiling); addr, next, floor, ceiling);
} while (addr = next, addr != end); } while (addr = next, addr != end);
...@@ -550,15 +533,17 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, ...@@ -550,15 +533,17 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
hugetlb_free_pmd_range(tlb, pud, addr, next, floor, hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
ceiling); ceiling);
} else { } else {
#ifdef CONFIG_PPC_FSL_BOOK3E unsigned long more;
/* /*
* Increment next by the size of the huge mapping since * Increment next by the size of the huge mapping since
* there may be more than one entry at this level for a * there may be more than one entry at this level for a
* single hugepage, but all of them point to * single hugepage, but all of them point to
* the same kmem cache that holds the hugepte. * the same kmem cache that holds the hugepte.
*/ */
next = addr + (1 << hugepd_shift(*(hugepd_t *)pud)); more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
#endif if (more > next)
next = more;
free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
addr, next, floor, ceiling); addr, next, floor, ceiling);
} }
...@@ -615,15 +600,17 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, ...@@ -615,15 +600,17 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
continue; continue;
hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
} else { } else {
#ifdef CONFIG_PPC_FSL_BOOK3E unsigned long more;
/* /*
* Increment next by the size of the huge mapping since * Increment next by the size of the huge mapping since
* there may be more than one entry at the pgd level * there may be more than one entry at the pgd level
* for a single hugepage, but all of them point to the * for a single hugepage, but all of them point to the
* same kmem cache that holds the hugepte. * same kmem cache that holds the hugepte.
*/ */
next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd)); more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
#endif if (more > next)
next = more;
free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT, free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
addr, next, floor, ceiling); addr, next, floor, ceiling);
} }
...@@ -753,12 +740,13 @@ static int __init add_huge_page_size(unsigned long long size) ...@@ -753,12 +740,13 @@ static int __init add_huge_page_size(unsigned long long size)
/* Check that it is a page size supported by the hardware and /* Check that it is a page size supported by the hardware and
* that it fits within pagetable and slice limits. */ * that it fits within pagetable and slice limits. */
#ifdef CONFIG_PPC_FSL_BOOK3E if (size <= PAGE_SIZE)
if ((size < PAGE_SIZE) || !is_power_of_4(size))
return -EINVAL; return -EINVAL;
#else #if defined(CONFIG_PPC_FSL_BOOK3E)
if (!is_power_of_2(size) if (!is_power_of_4(size))
|| (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT)) return -EINVAL;
#elif !defined(CONFIG_PPC_8xx)
if (!is_power_of_2(size) || (shift > SLICE_HIGH_SHIFT))
return -EINVAL; return -EINVAL;
#endif #endif
...@@ -791,53 +779,15 @@ static int __init hugepage_setup_sz(char *str) ...@@ -791,53 +779,15 @@ static int __init hugepage_setup_sz(char *str)
} }
__setup("hugepagesz=", hugepage_setup_sz); __setup("hugepagesz=", hugepage_setup_sz);
#ifdef CONFIG_PPC_FSL_BOOK3E
struct kmem_cache *hugepte_cache; struct kmem_cache *hugepte_cache;
static int __init hugetlbpage_init(void) static int __init hugetlbpage_init(void)
{ {
int psize; int psize;
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { #if !defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_PPC_8xx)
unsigned shift;
if (!mmu_psize_defs[psize].shift)
continue;
shift = mmu_psize_to_shift(psize);
/* Don't treat normal page sizes as huge... */
if (shift != PAGE_SHIFT)
if (add_huge_page_size(1ULL << shift) < 0)
continue;
}
/*
* Create a kmem cache for hugeptes. The bottom bits in the pte have
* size information encoded in them, so align them to allow this
*/
hugepte_cache = kmem_cache_create("hugepte-cache", sizeof(pte_t),
HUGEPD_SHIFT_MASK + 1, 0, NULL);
if (hugepte_cache == NULL)
panic("%s: Unable to create kmem cache for hugeptes\n",
__func__);
/* Default hpage size = 4M */
if (mmu_psize_defs[MMU_PAGE_4M].shift)
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
else
panic("%s: Unable to set default huge page size\n", __func__);
return 0;
}
#else
static int __init hugetlbpage_init(void)
{
int psize;
if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE)) if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE))
return -ENODEV; return -ENODEV;
#endif
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
unsigned shift; unsigned shift;
unsigned pdshift; unsigned pdshift;
...@@ -850,9 +800,9 @@ static int __init hugetlbpage_init(void) ...@@ -850,9 +800,9 @@ static int __init hugetlbpage_init(void)
if (add_huge_page_size(1ULL << shift) < 0) if (add_huge_page_size(1ULL << shift) < 0)
continue; continue;
if (shift < PMD_SHIFT) if (shift < HUGEPD_PUD_SHIFT)
pdshift = PMD_SHIFT; pdshift = PMD_SHIFT;
else if (shift < PUD_SHIFT) else if (shift < HUGEPD_PGD_SHIFT)
pdshift = PUD_SHIFT; pdshift = PUD_SHIFT;
else else
pdshift = PGDIR_SHIFT; pdshift = PGDIR_SHIFT;
...@@ -860,14 +810,38 @@ static int __init hugetlbpage_init(void) ...@@ -860,14 +810,38 @@ static int __init hugetlbpage_init(void)
* if we have pdshift and shift value same, we don't * if we have pdshift and shift value same, we don't
* use pgt cache for hugepd. * use pgt cache for hugepd.
*/ */
if (pdshift != shift) { if (pdshift > shift) {
pgtable_cache_add(pdshift - shift, NULL); pgtable_cache_add(pdshift - shift, NULL);
if (!PGT_CACHE(pdshift - shift)) if (!PGT_CACHE(pdshift - shift))
panic("hugetlbpage_init(): could not create " panic("hugetlbpage_init(): could not create "
"pgtable cache for %d bit pagesize\n", shift); "pgtable cache for %d bit pagesize\n", shift);
} }
#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
else if (!hugepte_cache) {
/*
* Create a kmem cache for hugeptes. The bottom bits in
* the pte have size information encoded in them, so
* align them to allow this
*/
hugepte_cache = kmem_cache_create("hugepte-cache",
sizeof(pte_t),
HUGEPD_SHIFT_MASK + 1,
0, NULL);
if (hugepte_cache == NULL)
panic("%s: Unable to create kmem cache "
"for hugeptes\n", __func__);
}
#endif
} }
#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
/* Default hpage size = 4M on FSL_BOOK3E and 512k on 8xx */
if (mmu_psize_defs[MMU_PAGE_4M].shift)
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
else if (mmu_psize_defs[MMU_PAGE_512K].shift)
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_512K].shift;
#else
/* Set default large page size. Currently, we pick 16M or 1M /* Set default large page size. Currently, we pick 16M or 1M
* depending on what is available * depending on what is available
*/ */
...@@ -877,11 +851,13 @@ static int __init hugetlbpage_init(void) ...@@ -877,11 +851,13 @@ static int __init hugetlbpage_init(void)
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift; HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
else if (mmu_psize_defs[MMU_PAGE_2M].shift) else if (mmu_psize_defs[MMU_PAGE_2M].shift)
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift; HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
#endif
else
panic("%s: Unable to set default huge page size\n", __func__);
return 0; return 0;
} }
#endif
arch_initcall(hugetlbpage_init); arch_initcall(hugetlbpage_init);
void flush_dcache_icache_hugepage(struct page *page) void flush_dcache_icache_hugepage(struct page *page)
......
/*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* Dave Engebretsen <engebret@us.ibm.com>
* Rework for PPC64 port.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#undef DEBUG
#include <linux/string.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
static void pgd_ctor(void *addr)
{
memset(addr, 0, PGD_TABLE_SIZE);
}
static void pud_ctor(void *addr)
{
memset(addr, 0, PUD_TABLE_SIZE);
}
static void pmd_ctor(void *addr)
{
memset(addr, 0, PMD_TABLE_SIZE);
}
struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
/*
* Create a kmem_cache() for pagetables. This is not used for PTE
* pages - they're linked to struct page, come from the normal free
* pages pool and have a different entry size (see real_pte_t) to
* everything else. Caches created by this function are used for all
* the higher level pagetables, and for hugepage pagetables.
*/
void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
{
char *name;
unsigned long table_size = sizeof(void *) << shift;
unsigned long align = table_size;
/* When batching pgtable pointers for RCU freeing, we store
* the index size in the low bits. Table alignment must be
* big enough to fit it.
*
* Likewise, hugeapge pagetable pointers contain a (different)
* shift value in the low bits. All tables must be aligned so
* as to leave enough 0 bits in the address to contain it. */
unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
HUGEPD_SHIFT_MASK + 1);
struct kmem_cache *new;
/* It would be nice if this was a BUILD_BUG_ON(), but at the
* moment, gcc doesn't seem to recognize is_power_of_2 as a
* constant expression, so so much for that. */
BUG_ON(!is_power_of_2(minalign));
BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
if (PGT_CACHE(shift))
return; /* Already have a cache of this size */
align = max_t(unsigned long, align, minalign);
name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
new = kmem_cache_create(name, table_size, align, 0, ctor);
kfree(name);
pgtable_cache[shift - 1] = new;
pr_debug("Allocated pgtable cache for order %d\n", shift);
}
void pgtable_cache_init(void)
{
pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
/*
* In all current configs, when the PUD index exists it's the
* same size as either the pgd or pmd index except with THP enabled
* on book3s 64
*/
if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
if (!PGT_CACHE(PGD_INDEX_SIZE))
panic("Couldn't allocate pgd cache");
if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
panic("Couldn't allocate pmd pgtable caches");
if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
panic("Couldn't allocate pud pgtable caches");
}
...@@ -80,83 +80,6 @@ EXPORT_SYMBOL_GPL(memstart_addr); ...@@ -80,83 +80,6 @@ EXPORT_SYMBOL_GPL(memstart_addr);
phys_addr_t kernstart_addr; phys_addr_t kernstart_addr;
EXPORT_SYMBOL_GPL(kernstart_addr); EXPORT_SYMBOL_GPL(kernstart_addr);
static void pgd_ctor(void *addr)
{
memset(addr, 0, PGD_TABLE_SIZE);
}
static void pud_ctor(void *addr)
{
memset(addr, 0, PUD_TABLE_SIZE);
}
static void pmd_ctor(void *addr)
{
memset(addr, 0, PMD_TABLE_SIZE);
}
struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
/*
* Create a kmem_cache() for pagetables. This is not used for PTE
* pages - they're linked to struct page, come from the normal free
* pages pool and have a different entry size (see real_pte_t) to
* everything else. Caches created by this function are used for all
* the higher level pagetables, and for hugepage pagetables.
*/
void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
{
char *name;
unsigned long table_size = sizeof(void *) << shift;
unsigned long align = table_size;
/* When batching pgtable pointers for RCU freeing, we store
* the index size in the low bits. Table alignment must be
* big enough to fit it.
*
* Likewise, hugeapge pagetable pointers contain a (different)
* shift value in the low bits. All tables must be aligned so
* as to leave enough 0 bits in the address to contain it. */
unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
HUGEPD_SHIFT_MASK + 1);
struct kmem_cache *new;
/* It would be nice if this was a BUILD_BUG_ON(), but at the
* moment, gcc doesn't seem to recognize is_power_of_2 as a
* constant expression, so so much for that. */
BUG_ON(!is_power_of_2(minalign));
BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
if (PGT_CACHE(shift))
return; /* Already have a cache of this size */
align = max_t(unsigned long, align, minalign);
name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
new = kmem_cache_create(name, table_size, align, 0, ctor);
kfree(name);
pgtable_cache[shift - 1] = new;
pr_debug("Allocated pgtable cache for order %d\n", shift);
}
void pgtable_cache_init(void)
{
pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
/*
* In all current configs, when the PUD index exists it's the
* same size as either the pgd or pmd index except with THP enabled
* on book3s 64
*/
if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX))
panic("Couldn't allocate pgtable caches");
if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
panic("Couldn't allocate pud pgtable caches");
}
#ifdef CONFIG_SPARSEMEM_VMEMMAP #ifdef CONFIG_SPARSEMEM_VMEMMAP
/* /*
* Given an address within the vmemmap, determine the pfn of the page that * Given an address within the vmemmap, determine the pfn of the page that
......
...@@ -42,43 +42,6 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ ...@@ -42,43 +42,6 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
extern char etext[], _stext[], _sinittext[], _einittext[]; extern char etext[], _stext[], _sinittext[], _einittext[];
#define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT)
#ifndef CONFIG_PPC_4K_PAGES
static struct kmem_cache *pgtable_cache;
void pgtable_cache_init(void)
{
pgtable_cache = kmem_cache_create("PGDIR cache", 1 << PGDIR_ORDER,
1 << PGDIR_ORDER, 0, NULL);
if (pgtable_cache == NULL)
panic("Couldn't allocate pgtable caches");
}
#endif
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret;
/* pgdir take page or two with 4K pages and a page fraction otherwise */
#ifndef CONFIG_PPC_4K_PAGES
ret = kmem_cache_alloc(pgtable_cache, GFP_KERNEL | __GFP_ZERO);
#else
ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
PGDIR_ORDER - PAGE_SHIFT);
#endif
return ret;
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
#ifndef CONFIG_PPC_4K_PAGES
kmem_cache_free(pgtable_cache, (void *)pgd);
#else
free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT);
#endif
}
__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{ {
pte_t *pte; pte_t *pte;
......
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
* other sizes not listed here. The .ind field is only used on MMUs that have * other sizes not listed here. The .ind field is only used on MMUs that have
* indirect page table entries. * indirect page table entries.
*/ */
#ifdef CONFIG_PPC_BOOK3E_MMU #if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx)
#ifdef CONFIG_PPC_FSL_BOOK3E #ifdef CONFIG_PPC_FSL_BOOK3E
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
[MMU_PAGE_4K] = { [MMU_PAGE_4K] = {
...@@ -85,6 +85,25 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { ...@@ -85,6 +85,25 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
.enc = BOOK3E_PAGESZ_1GB, .enc = BOOK3E_PAGESZ_1GB,
}, },
}; };
#elif defined(CONFIG_PPC_8xx)
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
/* we only manage 4k and 16k pages as normal pages */
#ifdef CONFIG_PPC_4K_PAGES
[MMU_PAGE_4K] = {
.shift = 12,
},
#else
[MMU_PAGE_16K] = {
.shift = 14,
},
#endif
[MMU_PAGE_512K] = {
.shift = 19,
},
[MMU_PAGE_8M] = {
.shift = 23,
},
};
#else #else
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
[MMU_PAGE_4K] = { [MMU_PAGE_4K] = {
......
...@@ -441,8 +441,4 @@ static struct platform_driver pmc_driver = { ...@@ -441,8 +441,4 @@ static struct platform_driver pmc_driver = {
.remove = pmc_remove .remove = pmc_remove
}; };
static int pmc_init(void) builtin_platform_driver(pmc_driver);
{
return platform_driver_register(&pmc_driver);
}
device_initcall(pmc_init);
...@@ -253,6 +253,8 @@ endif # PPC32 ...@@ -253,6 +253,8 @@ endif # PPC32
config PPC_QEMU_E500 config PPC_QEMU_E500
bool "QEMU generic e500 platform" bool "QEMU generic e500 platform"
select DEFAULT_UIMAGE select DEFAULT_UIMAGE
select E500
select PPC_E500MC if PPC64
help help
This option enables support for running as a QEMU guest using This option enables support for running as a QEMU guest using
QEMU's generic e500 machine. This is not required if you're QEMU's generic e500 machine. This is not required if you're
......
...@@ -130,6 +130,7 @@ config 8xx_CPU6 ...@@ -130,6 +130,7 @@ config 8xx_CPU6
config 8xx_CPU15 config 8xx_CPU15
bool "CPU15 Silicon Errata" bool "CPU15 Silicon Errata"
depends on !HUGETLB_PAGE
default y default y
help help
This enables a workaround for erratum CPU15 on MPC8xx chips. This enables a workaround for erratum CPU15 on MPC8xx chips.
......
...@@ -34,6 +34,7 @@ config PPC_8xx ...@@ -34,6 +34,7 @@ config PPC_8xx
select FSL_SOC select FSL_SOC
select 8xx select 8xx
select PPC_LIB_RHEAP select PPC_LIB_RHEAP
select SYS_SUPPORTS_HUGETLBFS
config 40x config 40x
bool "AMCC 40x" bool "AMCC 40x"
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/fsl_lbc.h> #include <asm/fsl_lbc.h>
static spinlock_t fsl_lbc_lock = __SPIN_LOCK_UNLOCKED(fsl_lbc_lock); static DEFINE_SPINLOCK(fsl_lbc_lock);
struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev; struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev;
EXPORT_SYMBOL(fsl_lbc_ctrl_dev); EXPORT_SYMBOL(fsl_lbc_ctrl_dev);
......
...@@ -85,8 +85,4 @@ static struct platform_driver pmc_driver = { ...@@ -85,8 +85,4 @@ static struct platform_driver pmc_driver = {
.probe = pmc_probe, .probe = pmc_probe,
}; };
static int __init pmc_init(void) builtin_platform_driver(pmc_driver);
{
return platform_driver_register(&pmc_driver);
}
device_initcall(pmc_init);
...@@ -77,13 +77,10 @@ phys_addr_t get_immrbase(void) ...@@ -77,13 +77,10 @@ phys_addr_t get_immrbase(void)
EXPORT_SYMBOL(get_immrbase); EXPORT_SYMBOL(get_immrbase);
static u32 sysfreq = -1;
u32 fsl_get_sys_freq(void) u32 fsl_get_sys_freq(void)
{ {
static u32 sysfreq = -1;
struct device_node *soc; struct device_node *soc;
const u32 *prop;
int size;
if (sysfreq != -1) if (sysfreq != -1)
return sysfreq; return sysfreq;
...@@ -92,12 +89,9 @@ u32 fsl_get_sys_freq(void) ...@@ -92,12 +89,9 @@ u32 fsl_get_sys_freq(void)
if (!soc) if (!soc)
return -1; return -1;
prop = of_get_property(soc, "clock-frequency", &size); of_property_read_u32(soc, "clock-frequency", &sysfreq);
if (!prop || size != sizeof(*prop) || *prop == 0) if (sysfreq == -1 || !sysfreq)
prop = of_get_property(soc, "bus-frequency", &size); of_property_read_u32(soc, "bus-frequency", &sysfreq);
if (prop && size == sizeof(*prop))
sysfreq = *prop;
of_node_put(soc); of_node_put(soc);
return sysfreq; return sysfreq;
...@@ -106,23 +100,17 @@ EXPORT_SYMBOL(fsl_get_sys_freq); ...@@ -106,23 +100,17 @@ EXPORT_SYMBOL(fsl_get_sys_freq);
#if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx) #if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx)
static u32 brgfreq = -1;
u32 get_brgfreq(void) u32 get_brgfreq(void)
{ {
static u32 brgfreq = -1;
struct device_node *node; struct device_node *node;
const unsigned int *prop;
int size;
if (brgfreq != -1) if (brgfreq != -1)
return brgfreq; return brgfreq;
node = of_find_compatible_node(NULL, NULL, "fsl,cpm-brg"); node = of_find_compatible_node(NULL, NULL, "fsl,cpm-brg");
if (node) { if (node) {
prop = of_get_property(node, "clock-frequency", &size); of_property_read_u32(node, "clock-frequency", &brgfreq);
if (prop && size == 4)
brgfreq = *prop;
of_node_put(node); of_node_put(node);
return brgfreq; return brgfreq;
} }
...@@ -135,15 +123,11 @@ u32 get_brgfreq(void) ...@@ -135,15 +123,11 @@ u32 get_brgfreq(void)
node = of_find_node_by_type(NULL, "qe"); node = of_find_node_by_type(NULL, "qe");
if (node) { if (node) {
prop = of_get_property(node, "brg-frequency", &size); of_property_read_u32(node, "brg-frequency", &brgfreq);
if (prop && size == 4) if (brgfreq == -1 || !brgfreq)
brgfreq = *prop; if (!of_property_read_u32(node, "bus-frequency",
&brgfreq))
if (brgfreq == -1 || brgfreq == 0) { brgfreq /= 2;
prop = of_get_property(node, "bus-frequency", &size);
if (prop && size == 4)
brgfreq = *prop / 2;
}
of_node_put(node); of_node_put(node);
} }
...@@ -152,10 +136,9 @@ u32 get_brgfreq(void) ...@@ -152,10 +136,9 @@ u32 get_brgfreq(void)
EXPORT_SYMBOL(get_brgfreq); EXPORT_SYMBOL(get_brgfreq);
static u32 fs_baudrate = -1;
u32 get_baudrate(void) u32 get_baudrate(void)
{ {
static u32 fs_baudrate = -1;
struct device_node *node; struct device_node *node;
if (fs_baudrate != -1) if (fs_baudrate != -1)
...@@ -163,12 +146,7 @@ u32 get_baudrate(void) ...@@ -163,12 +146,7 @@ u32 get_baudrate(void)
node = of_find_node_by_type(NULL, "serial"); node = of_find_node_by_type(NULL, "serial");
if (node) { if (node) {
int size; of_property_read_u32(node, "current-speed", &fs_baudrate);
const unsigned int *prop = of_get_property(node,
"current-speed", &size);
if (prop)
fs_baudrate = *prop;
of_node_put(node); of_node_put(node);
} }
......
...@@ -167,12 +167,12 @@ struct bm_portal { ...@@ -167,12 +167,12 @@ struct bm_portal {
/* Cache-inhibited register access. */ /* Cache-inhibited register access. */
static inline u32 bm_in(struct bm_portal *p, u32 offset) static inline u32 bm_in(struct bm_portal *p, u32 offset)
{ {
return __raw_readl(p->addr.ci + offset); return be32_to_cpu(__raw_readl(p->addr.ci + offset));
} }
static inline void bm_out(struct bm_portal *p, u32 offset, u32 val) static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
{ {
__raw_writel(val, p->addr.ci + offset); __raw_writel(cpu_to_be32(val), p->addr.ci + offset);
} }
/* Cache Enabled Portal Access */ /* Cache Enabled Portal Access */
...@@ -188,7 +188,7 @@ static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset) ...@@ -188,7 +188,7 @@ static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
static inline u32 bm_ce_in(struct bm_portal *p, u32 offset) static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
{ {
return __raw_readl(p->addr.ce + offset); return be32_to_cpu(__raw_readl(p->addr.ce + offset));
} }
struct bman_portal { struct bman_portal {
...@@ -391,7 +391,7 @@ static void bm_rcr_finish(struct bm_portal *portal) ...@@ -391,7 +391,7 @@ static void bm_rcr_finish(struct bm_portal *portal)
i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1); i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
if (i != rcr_ptr2idx(rcr->cursor)) if (i != rcr_ptr2idx(rcr->cursor))
pr_crit("losing uncommited RCR entries\n"); pr_crit("losing uncommitted RCR entries\n");
i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1); i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
if (i != rcr->ci) if (i != rcr->ci)
......
...@@ -181,8 +181,7 @@ static int fsl_bman_probe(struct platform_device *pdev) ...@@ -181,8 +181,7 @@ static int fsl_bman_probe(struct platform_device *pdev)
node->full_name); node->full_name);
return -ENXIO; return -ENXIO;
} }
bm_ccsr_start = devm_ioremap(dev, res->start, bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
res->end - res->start + 1);
if (!bm_ccsr_start) if (!bm_ccsr_start)
return -ENXIO; return -ENXIO;
......
...@@ -146,15 +146,19 @@ static int bman_portal_probe(struct platform_device *pdev) ...@@ -146,15 +146,19 @@ static int bman_portal_probe(struct platform_device *pdev)
pcfg->irq = irq; pcfg->irq = irq;
va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
if (!va) if (!va) {
dev_err(dev, "ioremap::CE failed\n");
goto err_ioremap1; goto err_ioremap1;
}
pcfg->addr_virt[DPAA_PORTAL_CE] = va; pcfg->addr_virt[DPAA_PORTAL_CE] = va;
va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
_PAGE_GUARDED | _PAGE_NO_CACHE); _PAGE_GUARDED | _PAGE_NO_CACHE);
if (!va) if (!va) {
dev_err(dev, "ioremap::CI failed\n");
goto err_ioremap2; goto err_ioremap2;
}
pcfg->addr_virt[DPAA_PORTAL_CI] = va; pcfg->addr_virt[DPAA_PORTAL_CI] = va;
...@@ -170,8 +174,10 @@ static int bman_portal_probe(struct platform_device *pdev) ...@@ -170,8 +174,10 @@ static int bman_portal_probe(struct platform_device *pdev)
spin_unlock(&bman_lock); spin_unlock(&bman_lock);
pcfg->cpu = cpu; pcfg->cpu = cpu;
if (!init_pcfg(pcfg)) if (!init_pcfg(pcfg)) {
goto err_ioremap2; dev_err(dev, "portal init failed\n");
goto err_portal_init;
}
/* clear irq affinity if assigned cpu is offline */ /* clear irq affinity if assigned cpu is offline */
if (!cpu_online(cpu)) if (!cpu_online(cpu))
...@@ -179,10 +185,11 @@ static int bman_portal_probe(struct platform_device *pdev) ...@@ -179,10 +185,11 @@ static int bman_portal_probe(struct platform_device *pdev)
return 0; return 0;
err_portal_init:
iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
err_ioremap2: err_ioremap2:
iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
err_ioremap1: err_ioremap1:
dev_err(dev, "ioremap failed\n");
return -ENXIO; return -ENXIO;
} }
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_reserved_mem.h> #include <linux/of_reserved_mem.h>
#include <linux/prefetch.h> #include <linux/prefetch.h>
#include <linux/genalloc.h> #include <linux/genalloc.h>
......
...@@ -140,10 +140,10 @@ enum qm_mr_cmode { /* matches QCSP_CFG::MM */ ...@@ -140,10 +140,10 @@ enum qm_mr_cmode { /* matches QCSP_CFG::MM */
struct qm_eqcr_entry { struct qm_eqcr_entry {
u8 _ncw_verb; /* writes to this are non-coherent */ u8 _ncw_verb; /* writes to this are non-coherent */
u8 dca; u8 dca;
u16 seqnum; __be16 seqnum;
u32 orp; /* 24-bit */ u8 __reserved[4];
u32 fqid; /* 24-bit */ __be32 fqid; /* 24-bit */
u32 tag; __be32 tag;
struct qm_fd fd; struct qm_fd fd;
u8 __reserved3[32]; u8 __reserved3[32];
} __packed; } __packed;
...@@ -183,41 +183,22 @@ struct qm_mr { ...@@ -183,41 +183,22 @@ struct qm_mr {
}; };
/* MC (Management Command) command */ /* MC (Management Command) command */
/* "Query FQ" */ /* "FQ" command layout */
struct qm_mcc_queryfq { struct qm_mcc_fq {
u8 _ncw_verb; u8 _ncw_verb;
u8 __reserved1[3]; u8 __reserved1[3];
u32 fqid; /* 24-bit */ __be32 fqid; /* 24-bit */
u8 __reserved2[56]; u8 __reserved2[56];
} __packed; } __packed;
/* "Alter FQ State Commands " */
struct qm_mcc_alterfq {
u8 _ncw_verb;
u8 __reserved1[3];
u32 fqid; /* 24-bit */
u8 __reserved2;
u8 count; /* number of consecutive FQID */
u8 __reserved3[10];
u32 context_b; /* frame queue context b */
u8 __reserved4[40];
} __packed;
/* "Query CGR" */ /* "CGR" command layout */
struct qm_mcc_querycgr { struct qm_mcc_cgr {
u8 _ncw_verb; u8 _ncw_verb;
u8 __reserved1[30]; u8 __reserved1[30];
u8 cgid; u8 cgid;
u8 __reserved2[32]; u8 __reserved2[32];
}; };
struct qm_mcc_querywq {
u8 _ncw_verb;
u8 __reserved;
/* select channel if verb != QUERYWQ_DEDICATED */
u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
u8 __reserved2[60];
} __packed;
#define QM_MCC_VERB_VBIT 0x80 #define QM_MCC_VERB_VBIT 0x80
#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */ #define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
#define QM_MCC_VERB_INITFQ_PARKED 0x40 #define QM_MCC_VERB_INITFQ_PARKED 0x40
...@@ -243,12 +224,9 @@ union qm_mc_command { ...@@ -243,12 +224,9 @@ union qm_mc_command {
u8 __reserved[63]; u8 __reserved[63];
}; };
struct qm_mcc_initfq initfq; struct qm_mcc_initfq initfq;
struct qm_mcc_queryfq queryfq;
struct qm_mcc_alterfq alterfq;
struct qm_mcc_initcgr initcgr; struct qm_mcc_initcgr initcgr;
struct qm_mcc_querycgr querycgr; struct qm_mcc_fq fq;
struct qm_mcc_querywq querywq; struct qm_mcc_cgr cgr;
struct qm_mcc_queryfq_np queryfq_np;
}; };
/* MC (Management Command) result */ /* MC (Management Command) result */
...@@ -343,12 +321,12 @@ struct qm_portal { ...@@ -343,12 +321,12 @@ struct qm_portal {
/* Cache-inhibited register access. */ /* Cache-inhibited register access. */
static inline u32 qm_in(struct qm_portal *p, u32 offset) static inline u32 qm_in(struct qm_portal *p, u32 offset)
{ {
return __raw_readl(p->addr.ci + offset); return be32_to_cpu(__raw_readl(p->addr.ci + offset));
} }
static inline void qm_out(struct qm_portal *p, u32 offset, u32 val) static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
{ {
__raw_writel(val, p->addr.ci + offset); __raw_writel(cpu_to_be32(val), p->addr.ci + offset);
} }
/* Cache Enabled Portal Access */ /* Cache Enabled Portal Access */
...@@ -364,7 +342,7 @@ static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset) ...@@ -364,7 +342,7 @@ static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
static inline u32 qm_ce_in(struct qm_portal *p, u32 offset) static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
{ {
return __raw_readl(p->addr.ce + offset); return be32_to_cpu(__raw_readl(p->addr.ce + offset));
} }
/* --- EQCR API --- */ /* --- EQCR API --- */
...@@ -443,7 +421,7 @@ static inline void qm_eqcr_finish(struct qm_portal *portal) ...@@ -443,7 +421,7 @@ static inline void qm_eqcr_finish(struct qm_portal *portal)
DPAA_ASSERT(!eqcr->busy); DPAA_ASSERT(!eqcr->busy);
if (pi != eqcr_ptr2idx(eqcr->cursor)) if (pi != eqcr_ptr2idx(eqcr->cursor))
pr_crit("losing uncommited EQCR entries\n"); pr_crit("losing uncommitted EQCR entries\n");
if (ci != eqcr->ci) if (ci != eqcr->ci)
pr_crit("missing existing EQCR completions\n"); pr_crit("missing existing EQCR completions\n");
if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor)) if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
...@@ -492,8 +470,7 @@ static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal ...@@ -492,8 +470,7 @@ static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
static inline void eqcr_commit_checks(struct qm_eqcr *eqcr) static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
{ {
DPAA_ASSERT(eqcr->busy); DPAA_ASSERT(eqcr->busy);
DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff));
DPAA_ASSERT(eqcr->available >= 1); DPAA_ASSERT(eqcr->available >= 1);
} }
...@@ -962,8 +939,6 @@ struct qman_portal { ...@@ -962,8 +939,6 @@ struct qman_portal {
u32 sdqcr; u32 sdqcr;
/* probing time config params for cpu-affine portals */ /* probing time config params for cpu-affine portals */
const struct qm_portal_config *config; const struct qm_portal_config *config;
/* needed for providing a non-NULL device to dma_map_***() */
struct platform_device *pdev;
/* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
struct qman_cgrs *cgrs; struct qman_cgrs *cgrs;
/* linked-list of CSCN handlers. */ /* linked-list of CSCN handlers. */
...@@ -1133,7 +1108,6 @@ static int qman_create_portal(struct qman_portal *portal, ...@@ -1133,7 +1108,6 @@ static int qman_create_portal(struct qman_portal *portal,
const struct qman_cgrs *cgrs) const struct qman_cgrs *cgrs)
{ {
struct qm_portal *p; struct qm_portal *p;
char buf[16];
int ret; int ret;
u32 isdr; u32 isdr;
...@@ -1196,15 +1170,6 @@ static int qman_create_portal(struct qman_portal *portal, ...@@ -1196,15 +1170,6 @@ static int qman_create_portal(struct qman_portal *portal,
portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
sprintf(buf, "qportal-%d", c->channel);
portal->pdev = platform_device_alloc(buf, -1);
if (!portal->pdev)
goto fail_devalloc;
if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40)))
goto fail_devadd;
ret = platform_device_add(portal->pdev);
if (ret)
goto fail_devadd;
isdr = 0xffffffff; isdr = 0xffffffff;
qm_out(p, QM_REG_ISDR, isdr); qm_out(p, QM_REG_ISDR, isdr);
portal->irq_sources = 0; portal->irq_sources = 0;
...@@ -1239,8 +1204,8 @@ static int qman_create_portal(struct qman_portal *portal, ...@@ -1239,8 +1204,8 @@ static int qman_create_portal(struct qman_portal *portal,
/* special handling, drain just in case it's a few FQRNIs */ /* special handling, drain just in case it's a few FQRNIs */
const union qm_mr_entry *e = qm_mr_current(p); const union qm_mr_entry *e = qm_mr_current(p);
dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x", dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
e->verb, e->ern.rc, e->ern.fd.addr_lo); e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
goto fail_dqrr_mr_empty; goto fail_dqrr_mr_empty;
} }
/* Success */ /* Success */
...@@ -1256,10 +1221,6 @@ static int qman_create_portal(struct qman_portal *portal, ...@@ -1256,10 +1221,6 @@ static int qman_create_portal(struct qman_portal *portal,
fail_affinity: fail_affinity:
free_irq(c->irq, portal); free_irq(c->irq, portal);
fail_irq: fail_irq:
platform_device_del(portal->pdev);
fail_devadd:
platform_device_put(portal->pdev);
fail_devalloc:
kfree(portal->cgrs); kfree(portal->cgrs);
fail_cgrs: fail_cgrs:
qm_mc_finish(p); qm_mc_finish(p);
...@@ -1321,9 +1282,6 @@ static void qman_destroy_portal(struct qman_portal *qm) ...@@ -1321,9 +1282,6 @@ static void qman_destroy_portal(struct qman_portal *qm)
qm_dqrr_finish(&qm->p); qm_dqrr_finish(&qm->p);
qm_eqcr_finish(&qm->p); qm_eqcr_finish(&qm->p);
platform_device_del(qm->pdev);
platform_device_put(qm->pdev);
qm->config = NULL; qm->config = NULL;
} }
...@@ -1428,7 +1386,7 @@ static void qm_mr_process_task(struct work_struct *work) ...@@ -1428,7 +1386,7 @@ static void qm_mr_process_task(struct work_struct *work)
case QM_MR_VERB_FQRN: case QM_MR_VERB_FQRN:
case QM_MR_VERB_FQRL: case QM_MR_VERB_FQRL:
/* Lookup in the retirement table */ /* Lookup in the retirement table */
fq = fqid_to_fq(msg->fq.fqid); fq = fqid_to_fq(qm_fqid_get(&msg->fq));
if (WARN_ON(!fq)) if (WARN_ON(!fq))
break; break;
fq_state_change(p, fq, msg, verb); fq_state_change(p, fq, msg, verb);
...@@ -1437,7 +1395,7 @@ static void qm_mr_process_task(struct work_struct *work) ...@@ -1437,7 +1395,7 @@ static void qm_mr_process_task(struct work_struct *work)
break; break;
case QM_MR_VERB_FQPN: case QM_MR_VERB_FQPN:
/* Parked */ /* Parked */
fq = tag_to_fq(msg->fq.contextB); fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
fq_state_change(p, fq, msg, verb); fq_state_change(p, fq, msg, verb);
if (fq->cb.fqs) if (fq->cb.fqs)
fq->cb.fqs(p, fq, msg); fq->cb.fqs(p, fq, msg);
...@@ -1451,7 +1409,7 @@ static void qm_mr_process_task(struct work_struct *work) ...@@ -1451,7 +1409,7 @@ static void qm_mr_process_task(struct work_struct *work)
} }
} else { } else {
/* Its a software ERN */ /* Its a software ERN */
fq = tag_to_fq(msg->ern.tag); fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
fq->cb.ern(p, fq, msg); fq->cb.ern(p, fq, msg);
} }
num++; num++;
...@@ -1536,7 +1494,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p, ...@@ -1536,7 +1494,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
/* /*
* VDQCR: don't trust contextB as the FQ may have * VDQCR: don't trust context_b as the FQ may have
* been configured for h/w consumption and we're * been configured for h/w consumption and we're
* draining it post-retirement. * draining it post-retirement.
*/ */
...@@ -1562,8 +1520,8 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p, ...@@ -1562,8 +1520,8 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
clear_vdqcr(p, fq); clear_vdqcr(p, fq);
} else { } else {
/* SDQCR: contextB points to the FQ */ /* SDQCR: context_b points to the FQ */
fq = tag_to_fq(dq->contextB); fq = tag_to_fq(be32_to_cpu(dq->context_b));
/* Now let the callback do its stuff */ /* Now let the callback do its stuff */
res = fq->cb.dqrr(p, fq, dq); res = fq->cb.dqrr(p, fq, dq);
/* /*
...@@ -1780,9 +1738,9 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) ...@@ -1780,9 +1738,9 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
return -EINVAL; return -EINVAL;
#endif #endif
if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) { if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
/* And can't be set at the same time as TDTHRESH */ /* And can't be set at the same time as TDTHRESH */
if (opts->we_mask & QM_INITFQ_WE_TDTHRESH) if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
return -EINVAL; return -EINVAL;
} }
/* Issue an INITFQ_[PARKED|SCHED] management command */ /* Issue an INITFQ_[PARKED|SCHED] management command */
...@@ -1796,37 +1754,49 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) ...@@ -1796,37 +1754,49 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
mcc = qm_mc_start(&p->p); mcc = qm_mc_start(&p->p);
if (opts) if (opts)
mcc->initfq = *opts; mcc->initfq = *opts;
mcc->initfq.fqid = fq->fqid; qm_fqid_set(&mcc->fq, fq->fqid);
mcc->initfq.count = 0; mcc->initfq.count = 0;
/* /*
* If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
* demux pointer. Otherwise, the caller-provided value is allowed to * demux pointer. Otherwise, the caller-provided value is allowed to
* stand, don't overwrite it. * stand, don't overwrite it.
*/ */
if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
dma_addr_t phys_fq; dma_addr_t phys_fq;
mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB; mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
mcc->initfq.fqd.context_b = fq_to_tag(fq); mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
/* /*
* and the physical address - NB, if the user wasn't trying to * and the physical address - NB, if the user wasn't trying to
* set CONTEXTA, clear the stashing settings. * set CONTEXTA, clear the stashing settings.
*/ */
if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) { if (!(be16_to_cpu(mcc->initfq.we_mask) &
mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; QM_INITFQ_WE_CONTEXTA)) {
mcc->initfq.we_mask |=
cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
memset(&mcc->initfq.fqd.context_a, 0, memset(&mcc->initfq.fqd.context_a, 0,
sizeof(mcc->initfq.fqd.context_a)); sizeof(mcc->initfq.fqd.context_a));
} else { } else {
phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq), struct qman_portal *p = qman_dma_portal;
DMA_TO_DEVICE);
phys_fq = dma_map_single(p->config->dev, fq,
sizeof(*fq), DMA_TO_DEVICE);
if (dma_mapping_error(p->config->dev, phys_fq)) {
dev_err(p->config->dev, "dma_mapping failed\n");
ret = -EIO;
goto out;
}
qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
} }
} }
if (flags & QMAN_INITFQ_FLAG_LOCAL) { if (flags & QMAN_INITFQ_FLAG_LOCAL) {
int wq = 0; int wq = 0;
if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) { if (!(be16_to_cpu(mcc->initfq.we_mask) &
mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ; QM_INITFQ_WE_DESTWQ)) {
mcc->initfq.we_mask |=
cpu_to_be16(QM_INITFQ_WE_DESTWQ);
wq = 4; wq = 4;
} }
qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq); qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
...@@ -1845,13 +1815,13 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) ...@@ -1845,13 +1815,13 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
goto out; goto out;
} }
if (opts) { if (opts) {
if (opts->we_mask & QM_INITFQ_WE_FQCTRL) { if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE) if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
fq_set(fq, QMAN_FQ_STATE_CGR_EN); fq_set(fq, QMAN_FQ_STATE_CGR_EN);
else else
fq_clear(fq, QMAN_FQ_STATE_CGR_EN); fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
} }
if (opts->we_mask & QM_INITFQ_WE_CGID) if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
fq->cgr_groupid = opts->fqd.cgid; fq->cgr_groupid = opts->fqd.cgid;
} }
fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
...@@ -1884,7 +1854,7 @@ int qman_schedule_fq(struct qman_fq *fq) ...@@ -1884,7 +1854,7 @@ int qman_schedule_fq(struct qman_fq *fq)
goto out; goto out;
} }
mcc = qm_mc_start(&p->p); mcc = qm_mc_start(&p->p);
mcc->alterfq.fqid = fq->fqid; qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
if (!qm_mc_result_timeout(&p->p, &mcr)) { if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_err(p->config->dev, "ALTER_SCHED timeout\n"); dev_err(p->config->dev, "ALTER_SCHED timeout\n");
...@@ -1927,7 +1897,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags) ...@@ -1927,7 +1897,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags)
goto out; goto out;
} }
mcc = qm_mc_start(&p->p); mcc = qm_mc_start(&p->p);
mcc->alterfq.fqid = fq->fqid; qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
if (!qm_mc_result_timeout(&p->p, &mcr)) { if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_crit(p->config->dev, "ALTER_RETIRE timeout\n"); dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
...@@ -1970,8 +1940,8 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags) ...@@ -1970,8 +1940,8 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags)
msg.verb = QM_MR_VERB_FQRNI; msg.verb = QM_MR_VERB_FQRNI;
msg.fq.fqs = mcr->alterfq.fqs; msg.fq.fqs = mcr->alterfq.fqs;
msg.fq.fqid = fq->fqid; qm_fqid_set(&msg.fq, fq->fqid);
msg.fq.contextB = fq_to_tag(fq); msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
fq->cb.fqs(p, fq, &msg); fq->cb.fqs(p, fq, &msg);
} }
} else if (res == QM_MCR_RESULT_PENDING) { } else if (res == QM_MCR_RESULT_PENDING) {
...@@ -2006,7 +1976,7 @@ int qman_oos_fq(struct qman_fq *fq) ...@@ -2006,7 +1976,7 @@ int qman_oos_fq(struct qman_fq *fq)
goto out; goto out;
} }
mcc = qm_mc_start(&p->p); mcc = qm_mc_start(&p->p);
mcc->alterfq.fqid = fq->fqid; qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
if (!qm_mc_result_timeout(&p->p, &mcr)) { if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
...@@ -2032,7 +2002,7 @@ int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) ...@@ -2032,7 +2002,7 @@ int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
int ret = 0; int ret = 0;
mcc = qm_mc_start(&p->p); mcc = qm_mc_start(&p->p);
mcc->queryfq.fqid = fq->fqid; qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
if (!qm_mc_result_timeout(&p->p, &mcr)) { if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
...@@ -2058,7 +2028,7 @@ static int qman_query_fq_np(struct qman_fq *fq, ...@@ -2058,7 +2028,7 @@ static int qman_query_fq_np(struct qman_fq *fq,
int ret = 0; int ret = 0;
mcc = qm_mc_start(&p->p); mcc = qm_mc_start(&p->p);
mcc->queryfq.fqid = fq->fqid; qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
if (!qm_mc_result_timeout(&p->p, &mcr)) { if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
...@@ -2086,7 +2056,7 @@ static int qman_query_cgr(struct qman_cgr *cgr, ...@@ -2086,7 +2056,7 @@ static int qman_query_cgr(struct qman_cgr *cgr,
int ret = 0; int ret = 0;
mcc = qm_mc_start(&p->p); mcc = qm_mc_start(&p->p);
mcc->querycgr.cgid = cgr->cgrid; mcc->cgr.cgid = cgr->cgrid;
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
if (!qm_mc_result_timeout(&p->p, &mcr)) { if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
...@@ -2239,8 +2209,8 @@ int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd) ...@@ -2239,8 +2209,8 @@ int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
if (unlikely(!eq)) if (unlikely(!eq))
goto out; goto out;
eq->fqid = fq->fqid; qm_fqid_set(eq, fq->fqid);
eq->tag = fq_to_tag(fq); eq->tag = cpu_to_be32(fq_to_tag(fq));
eq->fd = *fd; eq->fd = *fd;
qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE); qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
...@@ -2282,7 +2252,24 @@ static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags, ...@@ -2282,7 +2252,24 @@ static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
} }
#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0) #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
#define TARG_MASK(n) (BIT(31) >> PORTAL_IDX(n))
/* congestion state change notification target update control */
static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
{
if (qman_ip_rev >= QMAN_REV30)
cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
else
cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
}
static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
{
if (qman_ip_rev >= QMAN_REV30)
cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
else
cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
}
static u8 qman_cgr_cpus[CGR_NUM]; static u8 qman_cgr_cpus[CGR_NUM];
...@@ -2305,7 +2292,6 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, ...@@ -2305,7 +2292,6 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
struct qm_mcc_initcgr *opts) struct qm_mcc_initcgr *opts)
{ {
struct qm_mcr_querycgr cgr_state; struct qm_mcr_querycgr cgr_state;
struct qm_mcc_initcgr local_opts = {};
int ret; int ret;
struct qman_portal *p; struct qman_portal *p;
...@@ -2327,22 +2313,18 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, ...@@ -2327,22 +2313,18 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
spin_lock(&p->cgr_lock); spin_lock(&p->cgr_lock);
if (opts) { if (opts) {
struct qm_mcc_initcgr local_opts = *opts;
ret = qman_query_cgr(cgr, &cgr_state); ret = qman_query_cgr(cgr, &cgr_state);
if (ret) if (ret)
goto out; goto out;
if (opts)
local_opts = *opts; qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) be32_to_cpu(cgr_state.cgr.cscn_targ));
local_opts.cgr.cscn_targ_upd_ctrl = local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
else
/* Overwrite TARG */
local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
TARG_MASK(p);
local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
/* send init if flags indicate so */ /* send init if flags indicate so */
if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) if (flags & QMAN_CGR_FLAG_USE_INIT)
ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
&local_opts); &local_opts);
else else
...@@ -2405,13 +2387,11 @@ int qman_delete_cgr(struct qman_cgr *cgr) ...@@ -2405,13 +2387,11 @@ int qman_delete_cgr(struct qman_cgr *cgr)
list_add(&cgr->node, &p->cgr_cbs); list_add(&cgr->node, &p->cgr_cbs);
goto release_lock; goto release_lock;
} }
/* Overwrite TARG */
local_opts.we_mask = QM_CGR_WE_CSCN_TARG; local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p); be32_to_cpu(cgr_state.cgr.cscn_targ));
else
local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
~(TARG_MASK(p));
ret = qm_modify_cgr(cgr, 0, &local_opts); ret = qm_modify_cgr(cgr, 0, &local_opts);
if (ret) if (ret)
/* add back to the list */ /* add back to the list */
...@@ -2501,7 +2481,7 @@ static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s, ...@@ -2501,7 +2481,7 @@ static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
} while (wait && !dqrr); } while (wait && !dqrr);
while (dqrr) { while (dqrr) {
if (dqrr->fqid == fqid && (dqrr->stat & s)) if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
found = 1; found = 1;
qm_dqrr_cdc_consume_1ptr(p, dqrr, 0); qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
qm_dqrr_pvb_update(p); qm_dqrr_pvb_update(p);
...@@ -2537,7 +2517,7 @@ static int qman_shutdown_fq(u32 fqid) ...@@ -2537,7 +2517,7 @@ static int qman_shutdown_fq(u32 fqid)
dev = p->config->dev; dev = p->config->dev;
/* Determine the state of the FQID */ /* Determine the state of the FQID */
mcc = qm_mc_start(&p->p); mcc = qm_mc_start(&p->p);
mcc->queryfq_np.fqid = fqid; qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
if (!qm_mc_result_timeout(&p->p, &mcr)) { if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_err(dev, "QUERYFQ_NP timeout\n"); dev_err(dev, "QUERYFQ_NP timeout\n");
...@@ -2552,7 +2532,7 @@ static int qman_shutdown_fq(u32 fqid) ...@@ -2552,7 +2532,7 @@ static int qman_shutdown_fq(u32 fqid)
/* Query which channel the FQ is using */ /* Query which channel the FQ is using */
mcc = qm_mc_start(&p->p); mcc = qm_mc_start(&p->p);
mcc->queryfq.fqid = fqid; qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
if (!qm_mc_result_timeout(&p->p, &mcr)) { if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_err(dev, "QUERYFQ timeout\n"); dev_err(dev, "QUERYFQ timeout\n");
...@@ -2572,7 +2552,7 @@ static int qman_shutdown_fq(u32 fqid) ...@@ -2572,7 +2552,7 @@ static int qman_shutdown_fq(u32 fqid)
case QM_MCR_NP_STATE_PARKED: case QM_MCR_NP_STATE_PARKED:
orl_empty = 0; orl_empty = 0;
mcc = qm_mc_start(&p->p); mcc = qm_mc_start(&p->p);
mcc->alterfq.fqid = fqid; qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
if (!qm_mc_result_timeout(&p->p, &mcr)) { if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_err(dev, "QUERYFQ_NP timeout\n"); dev_err(dev, "QUERYFQ_NP timeout\n");
...@@ -2667,7 +2647,7 @@ static int qman_shutdown_fq(u32 fqid) ...@@ -2667,7 +2647,7 @@ static int qman_shutdown_fq(u32 fqid)
cpu_relax(); cpu_relax();
} }
mcc = qm_mc_start(&p->p); mcc = qm_mc_start(&p->p);
mcc->alterfq.fqid = fqid; qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
if (!qm_mc_result_timeout(&p->p, &mcr)) { if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
...@@ -2687,7 +2667,7 @@ static int qman_shutdown_fq(u32 fqid) ...@@ -2687,7 +2667,7 @@ static int qman_shutdown_fq(u32 fqid)
case QM_MCR_NP_STATE_RETIRED: case QM_MCR_NP_STATE_RETIRED:
/* Send OOS Command */ /* Send OOS Command */
mcc = qm_mc_start(&p->p); mcc = qm_mc_start(&p->p);
mcc->alterfq.fqid = fqid; qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
if (!qm_mc_result_timeout(&p->p, &mcr)) { if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
...@@ -2722,6 +2702,7 @@ const struct qm_portal_config *qman_get_qm_portal_config( ...@@ -2722,6 +2702,7 @@ const struct qm_portal_config *qman_get_qm_portal_config(
{ {
return portal->config; return portal->config;
} }
EXPORT_SYMBOL(qman_get_qm_portal_config);
struct gen_pool *qm_fqalloc; /* FQID allocator */ struct gen_pool *qm_fqalloc; /* FQID allocator */
struct gen_pool *qm_qpalloc; /* pool-channel allocator */ struct gen_pool *qm_qpalloc; /* pool-channel allocator */
...@@ -2789,15 +2770,18 @@ static int qpool_cleanup(u32 qp) ...@@ -2789,15 +2770,18 @@ static int qpool_cleanup(u32 qp)
struct qm_mcr_queryfq_np np; struct qm_mcr_queryfq_np np;
err = qman_query_fq_np(&fq, &np); err = qman_query_fq_np(&fq, &np);
if (err) if (err == -ERANGE)
/* FQID range exceeded, found no problems */ /* FQID range exceeded, found no problems */
return 0; return 0;
else if (WARN_ON(err))
return err;
if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
struct qm_fqd fqd; struct qm_fqd fqd;
err = qman_query_fq(&fq, &fqd); err = qman_query_fq(&fq, &fqd);
if (WARN_ON(err)) if (WARN_ON(err))
return 0; return err;
if (qm_fqd_get_chan(&fqd) == qp) { if (qm_fqd_get_chan(&fqd) == qp) {
/* The channel is the FQ's target, clean it */ /* The channel is the FQ's target, clean it */
err = qman_shutdown_fq(fq.fqid); err = qman_shutdown_fq(fq.fqid);
...@@ -2836,7 +2820,7 @@ static int cgr_cleanup(u32 cgrid) ...@@ -2836,7 +2820,7 @@ static int cgr_cleanup(u32 cgrid)
* error, looking for non-OOS FQDs whose CGR is the CGR being released * error, looking for non-OOS FQDs whose CGR is the CGR being released
*/ */
struct qman_fq fq = { struct qman_fq fq = {
.fqid = 1 .fqid = QM_FQID_RANGE_START
}; };
int err; int err;
...@@ -2844,16 +2828,19 @@ static int cgr_cleanup(u32 cgrid) ...@@ -2844,16 +2828,19 @@ static int cgr_cleanup(u32 cgrid)
struct qm_mcr_queryfq_np np; struct qm_mcr_queryfq_np np;
err = qman_query_fq_np(&fq, &np); err = qman_query_fq_np(&fq, &np);
if (err) if (err == -ERANGE)
/* FQID range exceeded, found no problems */ /* FQID range exceeded, found no problems */
return 0; return 0;
else if (WARN_ON(err))
return err;
if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
struct qm_fqd fqd; struct qm_fqd fqd;
err = qman_query_fq(&fq, &fqd); err = qman_query_fq(&fq, &fqd);
if (WARN_ON(err)) if (WARN_ON(err))
return 0; return err;
if ((fqd.fq_ctrl & QM_FQCTRL_CGE) && if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
fqd.cgid == cgrid) { fqd.cgid == cgrid) {
pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n", pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
cgrid, fq.fqid); cgrid, fq.fqid);
......
...@@ -444,6 +444,9 @@ static int zero_priv_mem(struct device *dev, struct device_node *node, ...@@ -444,6 +444,9 @@ static int zero_priv_mem(struct device *dev, struct device_node *node,
/* map as cacheable, non-guarded */ /* map as cacheable, non-guarded */
void __iomem *tmpp = ioremap_prot(addr, sz, 0); void __iomem *tmpp = ioremap_prot(addr, sz, 0);
if (!tmpp)
return -ENOMEM;
memset_io(tmpp, 0, sz); memset_io(tmpp, 0, sz);
flush_dcache_range((unsigned long)tmpp, flush_dcache_range((unsigned long)tmpp,
(unsigned long)tmpp + sz); (unsigned long)tmpp + sz);
......
...@@ -30,6 +30,9 @@ ...@@ -30,6 +30,9 @@
#include "qman_priv.h" #include "qman_priv.h"
struct qman_portal *qman_dma_portal;
EXPORT_SYMBOL(qman_dma_portal);
/* Enable portal interupts (as opposed to polling mode) */ /* Enable portal interupts (as opposed to polling mode) */
#define CONFIG_FSL_DPA_PIRQ_SLOW 1 #define CONFIG_FSL_DPA_PIRQ_SLOW 1
#define CONFIG_FSL_DPA_PIRQ_FAST 1 #define CONFIG_FSL_DPA_PIRQ_FAST 1
...@@ -150,6 +153,10 @@ static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg) ...@@ -150,6 +153,10 @@ static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
/* all assigned portals are initialized now */ /* all assigned portals are initialized now */
qman_init_cgr_all(); qman_init_cgr_all();
} }
if (!qman_dma_portal)
qman_dma_portal = p;
spin_unlock(&qman_lock); spin_unlock(&qman_lock);
dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu); dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
...@@ -238,9 +245,9 @@ static int qman_portal_probe(struct platform_device *pdev) ...@@ -238,9 +245,9 @@ static int qman_portal_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node; struct device_node *node = dev->of_node;
struct qm_portal_config *pcfg; struct qm_portal_config *pcfg;
struct resource *addr_phys[2]; struct resource *addr_phys[2];
const u32 *channel;
void __iomem *va; void __iomem *va;
int irq, len, cpu; int irq, cpu, err;
u32 val;
pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
if (!pcfg) if (!pcfg)
...@@ -264,13 +271,13 @@ static int qman_portal_probe(struct platform_device *pdev) ...@@ -264,13 +271,13 @@ static int qman_portal_probe(struct platform_device *pdev)
return -ENXIO; return -ENXIO;
} }
channel = of_get_property(node, "cell-index", &len); err = of_property_read_u32(node, "cell-index", &val);
if (!channel || (len != 4)) { if (err) {
dev_err(dev, "Can't get %s property 'cell-index'\n", dev_err(dev, "Can't get %s property 'cell-index'\n",
node->full_name); node->full_name);
return -ENXIO; return err;
} }
pcfg->channel = *channel; pcfg->channel = val;
pcfg->cpu = -1; pcfg->cpu = -1;
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq <= 0) { if (irq <= 0) {
...@@ -280,15 +287,19 @@ static int qman_portal_probe(struct platform_device *pdev) ...@@ -280,15 +287,19 @@ static int qman_portal_probe(struct platform_device *pdev)
pcfg->irq = irq; pcfg->irq = irq;
va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
if (!va) if (!va) {
dev_err(dev, "ioremap::CE failed\n");
goto err_ioremap1; goto err_ioremap1;
}
pcfg->addr_virt[DPAA_PORTAL_CE] = va; pcfg->addr_virt[DPAA_PORTAL_CE] = va;
va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
_PAGE_GUARDED | _PAGE_NO_CACHE); _PAGE_GUARDED | _PAGE_NO_CACHE);
if (!va) if (!va) {
dev_err(dev, "ioremap::CI failed\n");
goto err_ioremap2; goto err_ioremap2;
}
pcfg->addr_virt[DPAA_PORTAL_CI] = va; pcfg->addr_virt[DPAA_PORTAL_CI] = va;
...@@ -306,8 +317,15 @@ static int qman_portal_probe(struct platform_device *pdev) ...@@ -306,8 +317,15 @@ static int qman_portal_probe(struct platform_device *pdev)
spin_unlock(&qman_lock); spin_unlock(&qman_lock);
pcfg->cpu = cpu; pcfg->cpu = cpu;
if (!init_pcfg(pcfg)) if (dma_set_mask(dev, DMA_BIT_MASK(40))) {
goto err_ioremap2; dev_err(dev, "dma_set_mask() failed\n");
goto err_portal_init;
}
if (!init_pcfg(pcfg)) {
dev_err(dev, "portal init failed\n");
goto err_portal_init;
}
/* clear irq affinity if assigned cpu is offline */ /* clear irq affinity if assigned cpu is offline */
if (!cpu_online(cpu)) if (!cpu_online(cpu))
...@@ -315,10 +333,11 @@ static int qman_portal_probe(struct platform_device *pdev) ...@@ -315,10 +333,11 @@ static int qman_portal_probe(struct platform_device *pdev)
return 0; return 0;
err_portal_init:
iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
err_ioremap2: err_ioremap2:
iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
err_ioremap1: err_ioremap1:
dev_err(dev, "ioremap failed\n");
return -ENXIO; return -ENXIO;
} }
......
...@@ -73,29 +73,23 @@ struct qm_mcr_querycgr { ...@@ -73,29 +73,23 @@ struct qm_mcr_querycgr {
struct __qm_mc_cgr cgr; /* CGR fields */ struct __qm_mc_cgr cgr; /* CGR fields */
u8 __reserved2[6]; u8 __reserved2[6];
u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */ u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */
u32 i_bcnt_lo; /* low 32-bits of 40-bit */ __be32 i_bcnt_lo; /* low 32-bits of 40-bit */
u8 __reserved3[3]; u8 __reserved3[3];
u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */ u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */
u32 a_bcnt_lo; /* low 32-bits of 40-bit */ __be32 a_bcnt_lo; /* low 32-bits of 40-bit */
u32 cscn_targ_swp[4]; __be32 cscn_targ_swp[4];
} __packed; } __packed;
static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q) static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
{ {
return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo; return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo);
} }
static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q) static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
{ {
return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo; return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo);
} }
/* "Query FQ Non-Programmable Fields" */ /* "Query FQ Non-Programmable Fields" */
struct qm_mcc_queryfq_np {
u8 _ncw_verb;
u8 __reserved1[3];
u32 fqid; /* 24-bit */
u8 __reserved2[56];
} __packed;
struct qm_mcr_queryfq_np { struct qm_mcr_queryfq_np {
u8 verb; u8 verb;
...@@ -367,5 +361,6 @@ int qman_alloc_fq_table(u32 num_fqids); ...@@ -367,5 +361,6 @@ int qman_alloc_fq_table(u32 num_fqids);
#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI) #define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
extern struct qman_portal *affine_portals[NR_CPUS]; extern struct qman_portal *affine_portals[NR_CPUS];
extern struct qman_portal *qman_dma_portal;
const struct qm_portal_config *qman_get_qm_portal_config( const struct qm_portal_config *qman_get_qm_portal_config(
struct qman_portal *portal); struct qman_portal *portal);
...@@ -65,7 +65,7 @@ static void fd_init(struct qm_fd *fd) ...@@ -65,7 +65,7 @@ static void fd_init(struct qm_fd *fd)
{ {
qm_fd_addr_set64(fd, 0xabdeadbeefLLU); qm_fd_addr_set64(fd, 0xabdeadbeefLLU);
qm_fd_set_contig_big(fd, 0x0000ffff); qm_fd_set_contig_big(fd, 0x0000ffff);
fd->cmd = 0xfeedf00d; fd->cmd = cpu_to_be32(0xfeedf00d);
} }
static void fd_inc(struct qm_fd *fd) static void fd_inc(struct qm_fd *fd)
...@@ -86,26 +86,19 @@ static void fd_inc(struct qm_fd *fd) ...@@ -86,26 +86,19 @@ static void fd_inc(struct qm_fd *fd)
len--; len--;
qm_fd_set_param(fd, fmt, off, len); qm_fd_set_param(fd, fmt, off, len);
fd->cmd++; fd->cmd = cpu_to_be32(be32_to_cpu(fd->cmd) + 1);
} }
/* The only part of the 'fd' we can't memcmp() is the ppid */ /* The only part of the 'fd' we can't memcmp() is the ppid */
static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b) static bool fd_neq(const struct qm_fd *a, const struct qm_fd *b)
{ {
int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1; bool neq = qm_fd_addr_get64(a) != qm_fd_addr_get64(b);
if (!r) { neq |= qm_fd_get_format(a) != qm_fd_get_format(b);
enum qm_fd_format fmt_a, fmt_b; neq |= a->cfg != b->cfg;
neq |= a->cmd != b->cmd;
fmt_a = qm_fd_get_format(a); return neq;
fmt_b = qm_fd_get_format(b);
r = fmt_a - fmt_b;
}
if (!r)
r = a->cfg - b->cfg;
if (!r)
r = a->cmd - b->cmd;
return r;
} }
/* test */ /* test */
...@@ -217,12 +210,12 @@ static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p, ...@@ -217,12 +210,12 @@ static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
struct qman_fq *fq, struct qman_fq *fq,
const struct qm_dqrr_entry *dq) const struct qm_dqrr_entry *dq)
{ {
if (WARN_ON(fd_cmp(&fd_dq, &dq->fd))) { if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) {
pr_err("BADNESS: dequeued frame doesn't match;\n"); pr_err("BADNESS: dequeued frame doesn't match;\n");
return qman_cb_dqrr_consume; return qman_cb_dqrr_consume;
} }
fd_inc(&fd_dq); fd_inc(&fd_dq);
if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) { if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_neq(&fd_dq, &fd)) {
sdqcr_complete = 1; sdqcr_complete = 1;
wake_up(&waitqueue); wake_up(&waitqueue);
} }
......
...@@ -175,7 +175,7 @@ static DEFINE_PER_CPU(struct hp_cpu, hp_cpus); ...@@ -175,7 +175,7 @@ static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
/* links together the hp_cpu structs, in first-come first-serve order. */ /* links together the hp_cpu structs, in first-come first-serve order. */
static LIST_HEAD(hp_cpu_list); static LIST_HEAD(hp_cpu_list);
static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock); static DEFINE_SPINLOCK(hp_lock);
static unsigned int hp_cpu_list_length; static unsigned int hp_cpu_list_length;
...@@ -191,6 +191,9 @@ static void *__frame_ptr; ...@@ -191,6 +191,9 @@ static void *__frame_ptr;
static u32 *frame_ptr; static u32 *frame_ptr;
static dma_addr_t frame_dma; static dma_addr_t frame_dma;
/* needed for dma_map*() */
static const struct qm_portal_config *pcfg;
/* the main function waits on this */ /* the main function waits on this */
static DECLARE_WAIT_QUEUE_HEAD(queue); static DECLARE_WAIT_QUEUE_HEAD(queue);
...@@ -210,16 +213,14 @@ static int allocate_frame_data(void) ...@@ -210,16 +213,14 @@ static int allocate_frame_data(void)
{ {
u32 lfsr = HP_FIRST_WORD; u32 lfsr = HP_FIRST_WORD;
int loop; int loop;
struct platform_device *pdev = platform_device_alloc("foobar", -1);
if (!pdev) { if (!qman_dma_portal) {
pr_crit("platform_device_alloc() failed"); pr_crit("portal not available\n");
return -EIO;
}
if (platform_device_add(pdev)) {
pr_crit("platform_device_add() failed");
return -EIO; return -EIO;
} }
pcfg = qman_get_qm_portal_config(qman_dma_portal);
__frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL); __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
if (!__frame_ptr) if (!__frame_ptr)
return -ENOMEM; return -ENOMEM;
...@@ -229,15 +230,22 @@ static int allocate_frame_data(void) ...@@ -229,15 +230,22 @@ static int allocate_frame_data(void)
frame_ptr[loop] = lfsr; frame_ptr[loop] = lfsr;
lfsr = do_lfsr(lfsr); lfsr = do_lfsr(lfsr);
} }
frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
platform_device_del(pdev); if (dma_mapping_error(pcfg->dev, frame_dma)) {
platform_device_put(pdev); pr_crit("dma mapping failure\n");
kfree(__frame_ptr);
return -EIO;
}
return 0; return 0;
} }
static void deallocate_frame_data(void) static void deallocate_frame_data(void)
{ {
dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS,
DMA_BIDIRECTIONAL);
kfree(__frame_ptr); kfree(__frame_ptr);
} }
...@@ -249,7 +257,8 @@ static inline int process_frame_data(struct hp_handler *handler, ...@@ -249,7 +257,8 @@ static inline int process_frame_data(struct hp_handler *handler,
int loop; int loop;
if (qm_fd_addr_get64(fd) != handler->addr) { if (qm_fd_addr_get64(fd) != handler->addr) {
pr_crit("bad frame address"); pr_crit("bad frame address, [%llX != %llX]\n",
qm_fd_addr_get64(fd), handler->addr);
return -EIO; return -EIO;
} }
for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
...@@ -397,8 +406,9 @@ static int init_handler(void *h) ...@@ -397,8 +406,9 @@ static int init_handler(void *h)
goto failed; goto failed;
} }
memset(&opts, 0, sizeof(opts)); memset(&opts, 0, sizeof(opts));
opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL |
opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING; QM_INITFQ_WE_CONTEXTA);
opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING);
qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL); qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED | err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
QMAN_INITFQ_FLAG_LOCAL, &opts); QMAN_INITFQ_FLAG_LOCAL, &opts);
......
...@@ -717,9 +717,5 @@ static struct platform_driver qe_driver = { ...@@ -717,9 +717,5 @@ static struct platform_driver qe_driver = {
.resume = qe_resume, .resume = qe_resume,
}; };
static int __init qe_drv_init(void) builtin_platform_driver(qe_driver);
{
return platform_driver_register(&qe_driver);
}
device_initcall(qe_drv_init);
#endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */ #endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */
...@@ -244,11 +244,11 @@ static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg) ...@@ -244,11 +244,11 @@ static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg)
struct qm_dqrr_entry { struct qm_dqrr_entry {
u8 verb; u8 verb;
u8 stat; u8 stat;
u16 seqnum; /* 15-bit */ __be16 seqnum; /* 15-bit */
u8 tok; u8 tok;
u8 __reserved2[3]; u8 __reserved2[3];
u32 fqid; /* 24-bit */ __be32 fqid; /* 24-bit */
u32 contextB; __be32 context_b;
struct qm_fd fd; struct qm_fd fd;
u8 __reserved4[32]; u8 __reserved4[32];
} __packed; } __packed;
...@@ -262,6 +262,11 @@ struct qm_dqrr_entry { ...@@ -262,6 +262,11 @@ struct qm_dqrr_entry {
#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */ #define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/ #define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
/* 'fqid' is a 24-bit field in every h/w descriptor */
#define QM_FQID_MASK GENMASK(23, 0)
#define qm_fqid_set(p, v) ((p)->fqid = cpu_to_be32((v) & QM_FQID_MASK))
#define qm_fqid_get(p) (be32_to_cpu((p)->fqid) & QM_FQID_MASK)
/* "ERN Message Response" */ /* "ERN Message Response" */
/* "FQ State Change Notification" */ /* "FQ State Change Notification" */
union qm_mr_entry { union qm_mr_entry {
...@@ -272,12 +277,11 @@ union qm_mr_entry { ...@@ -272,12 +277,11 @@ union qm_mr_entry {
struct { struct {
u8 verb; u8 verb;
u8 dca; u8 dca;
u16 seqnum; __be16 seqnum;
u8 rc; /* Rej Code: 8-bit */ u8 rc; /* Rej Code: 8-bit */
u8 orp_hi; /* ORP: 24-bit */ u8 __reserved[3];
u16 orp_lo; __be32 fqid; /* 24-bit */
u32 fqid; /* 24-bit */ __be32 tag;
u32 tag;
struct qm_fd fd; struct qm_fd fd;
u8 __reserved1[32]; u8 __reserved1[32];
} __packed ern; } __packed ern;
...@@ -285,8 +289,8 @@ union qm_mr_entry { ...@@ -285,8 +289,8 @@ union qm_mr_entry {
u8 verb; u8 verb;
u8 fqs; /* Frame Queue Status */ u8 fqs; /* Frame Queue Status */
u8 __reserved1[6]; u8 __reserved1[6];
u32 fqid; /* 24-bit */ __be32 fqid; /* 24-bit */
u32 contextB; __be32 context_b;
u8 __reserved2[48]; u8 __reserved2[48];
} __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
}; };
...@@ -405,13 +409,13 @@ static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd) ...@@ -405,13 +409,13 @@ static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr) static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
{ {
fqd->context_a.context_hi = upper_32_bits(addr); fqd->context_a.context_hi = cpu_to_be16(upper_32_bits(addr));
fqd->context_a.context_lo = lower_32_bits(addr); fqd->context_a.context_lo = cpu_to_be32(lower_32_bits(addr));
} }
static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr) static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
{ {
fqd->context_a.hi = cpu_to_be16(upper_32_bits(addr)); fqd->context_a.hi = cpu_to_be32(upper_32_bits(addr));
fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr)); fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr));
} }
...@@ -521,7 +525,7 @@ static inline int qm_fqd_get_wq(const struct qm_fqd *fqd) ...@@ -521,7 +525,7 @@ static inline int qm_fqd_get_wq(const struct qm_fqd *fqd)
*/ */
struct qm_cgr_wr_parm { struct qm_cgr_wr_parm {
/* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */ /* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */
u32 word; __be32 word;
}; };
/* /*
* This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
...@@ -532,7 +536,7 @@ struct qm_cgr_wr_parm { ...@@ -532,7 +536,7 @@ struct qm_cgr_wr_parm {
*/ */
struct qm_cgr_cs_thres { struct qm_cgr_cs_thres {
/* _res[13-15], TA[5-12], Tn[0-4] */ /* _res[13-15], TA[5-12], Tn[0-4] */
u16 word; __be16 word;
}; };
/* /*
* This identical structure of CGR fields is present in the "Init/Modify CGR" * This identical structure of CGR fields is present in the "Init/Modify CGR"
...@@ -549,10 +553,10 @@ struct __qm_mc_cgr { ...@@ -549,10 +553,10 @@ struct __qm_mc_cgr {
u8 cscn_en; /* boolean, use QM_CGR_EN */ u8 cscn_en; /* boolean, use QM_CGR_EN */
union { union {
struct { struct {
u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */ __be16 cscn_targ_upd_ctrl; /* use QM_CGR_TARG_UDP_* */
u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */ __be16 cscn_targ_dcp_low;
}; };
u32 cscn_targ; /* use QM_CGR_TARG_* */ __be32 cscn_targ; /* use QM_CGR_TARG_* */
}; };
u8 cstd_en; /* boolean, use QM_CGR_EN */ u8 cstd_en; /* boolean, use QM_CGR_EN */
u8 cs; /* boolean, only used in query response */ u8 cs; /* boolean, only used in query response */
...@@ -568,7 +572,9 @@ struct __qm_mc_cgr { ...@@ -568,7 +572,9 @@ struct __qm_mc_cgr {
/* Convert CGR thresholds to/from "cs_thres" format */ /* Convert CGR thresholds to/from "cs_thres" format */
static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th) static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
{ {
return ((th->word >> 5) & 0xff) << (th->word & 0x1f); int thres = be16_to_cpu(th->word);
return ((thres >> 5) & 0xff) << (thres & 0x1f);
} }
static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val, static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
...@@ -584,23 +590,23 @@ static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val, ...@@ -584,23 +590,23 @@ static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
if (roundup && oddbit) if (roundup && oddbit)
val++; val++;
} }
th->word = ((val & 0xff) << 5) | (e & 0x1f); th->word = cpu_to_be16(((val & 0xff) << 5) | (e & 0x1f));
return 0; return 0;
} }
/* "Initialize FQ" */ /* "Initialize FQ" */
struct qm_mcc_initfq { struct qm_mcc_initfq {
u8 __reserved1[2]; u8 __reserved1[2];
u16 we_mask; /* Write Enable Mask */ __be16 we_mask; /* Write Enable Mask */
u32 fqid; /* 24-bit */ __be32 fqid; /* 24-bit */
u16 count; /* Initialises 'count+1' FQDs */ __be16 count; /* Initialises 'count+1' FQDs */
struct qm_fqd fqd; /* the FQD fields go here */ struct qm_fqd fqd; /* the FQD fields go here */
u8 __reserved2[30]; u8 __reserved2[30];
} __packed; } __packed;
/* "Initialize/Modify CGR" */ /* "Initialize/Modify CGR" */
struct qm_mcc_initcgr { struct qm_mcc_initcgr {
u8 __reserve1[2]; u8 __reserve1[2];
u16 we_mask; /* Write Enable Mask */ __be16 we_mask; /* Write Enable Mask */
struct __qm_mc_cgr cgr; /* CGR fields */ struct __qm_mc_cgr cgr; /* CGR fields */
u8 __reserved2[2]; u8 __reserved2[2];
u8 cgid; u8 cgid;
...@@ -654,7 +660,7 @@ struct qman_cgr; ...@@ -654,7 +660,7 @@ struct qman_cgr;
/* /*
* This enum, and the callback type that returns it, are used when handling * This enum, and the callback type that returns it, are used when handling
* dequeued frames via DQRR. Note that for "null" callbacks registered with the * dequeued frames via DQRR. Note that for "null" callbacks registered with the
* portal object (for handling dequeues that do not demux because contextB is * portal object (for handling dequeues that do not demux because context_b is
* NULL), the return value *MUST* be qman_cb_dqrr_consume. * NULL), the return value *MUST* be qman_cb_dqrr_consume.
*/ */
enum qman_cb_dqrr_result { enum qman_cb_dqrr_result {
...@@ -859,11 +865,11 @@ void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools); ...@@ -859,11 +865,11 @@ void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
* qman_fq" for more info). NO_MODIFY is only intended for enqueuing to * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
* pre-existing frame-queues that aren't to be otherwise interfered with, it * pre-existing frame-queues that aren't to be otherwise interfered with, it
* prevents all other modifications to the frame queue. The TO_DCPORTAL flag * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
* causes the driver to honour any contextB modifications requested in the * causes the driver to honour any context_b modifications requested in the
* qm_init_fq() API, as this indicates the frame queue will be consumed by a * qm_init_fq() API, as this indicates the frame queue will be consumed by a
* direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
* software portals, the contextB field is controlled by the driver and can't be * software portals, the context_b field is controlled by the driver and can't
* modified by the caller. * be modified by the caller.
*/ */
int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq); int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment