Commit 2da37761 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/32: Fix objtool unannotated intra-function call warnings

Fix several annotations in assembly files on PPC32.

[Sathvika Vasireddy: Changed subject line and removed Kconfig change to
 enable objtool, as it is a part of "objtool/powerpc: Enable objtool to
 be built on ppc" patch in this series.]
Tested-by: default avatarNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Reviewed-by: default avatarNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Acked-by: default avatarJosh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarSathvika Vasireddy <sv@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20221114175754.1131267-7-sv@linux.ibm.com
parent 1c137323
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
* Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
*/ */
#include <linux/linkage.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/cputable.h> #include <asm/cputable.h>
...@@ -81,7 +83,7 @@ _GLOBAL(__setup_cpu_745x) ...@@ -81,7 +83,7 @@ _GLOBAL(__setup_cpu_745x)
blr blr
/* Enable caches for 603's, 604, 750 & 7400 */ /* Enable caches for 603's, 604, 750 & 7400 */
setup_common_caches: SYM_FUNC_START_LOCAL(setup_common_caches)
mfspr r11,SPRN_HID0 mfspr r11,SPRN_HID0
andi. r0,r11,HID0_DCE andi. r0,r11,HID0_DCE
ori r11,r11,HID0_ICE|HID0_DCE ori r11,r11,HID0_ICE|HID0_DCE
...@@ -95,11 +97,12 @@ setup_common_caches: ...@@ -95,11 +97,12 @@ setup_common_caches:
sync sync
isync isync
blr blr
SYM_FUNC_END(setup_common_caches)
/* 604, 604e, 604ev, ... /* 604, 604e, 604ev, ...
* Enable superscalar execution & branch history table * Enable superscalar execution & branch history table
*/ */
setup_604_hid0: SYM_FUNC_START_LOCAL(setup_604_hid0)
mfspr r11,SPRN_HID0 mfspr r11,SPRN_HID0
ori r11,r11,HID0_SIED|HID0_BHTE ori r11,r11,HID0_SIED|HID0_BHTE
ori r8,r11,HID0_BTCD ori r8,r11,HID0_BTCD
...@@ -110,6 +113,7 @@ setup_604_hid0: ...@@ -110,6 +113,7 @@ setup_604_hid0:
sync sync
isync isync
blr blr
SYM_FUNC_END(setup_604_hid0)
/* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some /* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some
* erratas we work around here. * erratas we work around here.
...@@ -125,13 +129,14 @@ setup_604_hid0: ...@@ -125,13 +129,14 @@ setup_604_hid0:
* needed once we have applied workaround #5 (though it's * needed once we have applied workaround #5 (though it's
* not set by Apple's firmware at least). * not set by Apple's firmware at least).
*/ */
setup_7400_workarounds: SYM_FUNC_START_LOCAL(setup_7400_workarounds)
mfpvr r3 mfpvr r3
rlwinm r3,r3,0,20,31 rlwinm r3,r3,0,20,31
cmpwi 0,r3,0x0207 cmpwi 0,r3,0x0207
ble 1f ble 1f
blr blr
setup_7410_workarounds: SYM_FUNC_END(setup_7400_workarounds)
SYM_FUNC_START_LOCAL(setup_7410_workarounds)
mfpvr r3 mfpvr r3
rlwinm r3,r3,0,20,31 rlwinm r3,r3,0,20,31
cmpwi 0,r3,0x0100 cmpwi 0,r3,0x0100
...@@ -151,6 +156,7 @@ setup_7410_workarounds: ...@@ -151,6 +156,7 @@ setup_7410_workarounds:
sync sync
isync isync
blr blr
SYM_FUNC_END(setup_7410_workarounds)
/* 740/750/7400/7410 /* 740/750/7400/7410
* Enable Store Gathering (SGE), Address Broadcast (ABE), * Enable Store Gathering (SGE), Address Broadcast (ABE),
...@@ -158,7 +164,7 @@ setup_7410_workarounds: ...@@ -158,7 +164,7 @@ setup_7410_workarounds:
* Dynamic Power Management (DPM), Speculative (SPD) * Dynamic Power Management (DPM), Speculative (SPD)
* Clear Instruction cache throttling (ICTC) * Clear Instruction cache throttling (ICTC)
*/ */
setup_750_7400_hid0: SYM_FUNC_START_LOCAL(setup_750_7400_hid0)
mfspr r11,SPRN_HID0 mfspr r11,SPRN_HID0
ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC
oris r11,r11,HID0_DPM@h oris r11,r11,HID0_DPM@h
...@@ -177,12 +183,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) ...@@ -177,12 +183,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM)
sync sync
isync isync
blr blr
SYM_FUNC_END(setup_750_7400_hid0)
/* 750cx specific /* 750cx specific
* Looks like we have to disable NAP feature for some PLL settings... * Looks like we have to disable NAP feature for some PLL settings...
* (waiting for confirmation) * (waiting for confirmation)
*/ */
setup_750cx: SYM_FUNC_START_LOCAL(setup_750cx)
mfspr r10, SPRN_HID1 mfspr r10, SPRN_HID1
rlwinm r10,r10,4,28,31 rlwinm r10,r10,4,28,31
cmpwi cr0,r10,7 cmpwi cr0,r10,7
...@@ -196,11 +203,13 @@ setup_750cx: ...@@ -196,11 +203,13 @@ setup_750cx:
andc r6,r6,r7 andc r6,r6,r7
stw r6,CPU_SPEC_FEATURES(r4) stw r6,CPU_SPEC_FEATURES(r4)
blr blr
SYM_FUNC_END(setup_750cx)
/* 750fx specific /* 750fx specific
*/ */
setup_750fx: SYM_FUNC_START_LOCAL(setup_750fx)
blr blr
SYM_FUNC_END(setup_750fx)
/* MPC 745x /* MPC 745x
* Enable Store Gathering (SGE), Branch Folding (FOLD) * Enable Store Gathering (SGE), Branch Folding (FOLD)
...@@ -212,7 +221,7 @@ setup_750fx: ...@@ -212,7 +221,7 @@ setup_750fx:
* Clear Instruction cache throttling (ICTC) * Clear Instruction cache throttling (ICTC)
* Enable L2 HW prefetch * Enable L2 HW prefetch
*/ */
setup_745x_specifics: SYM_FUNC_START_LOCAL(setup_745x_specifics)
/* We check for the presence of an L3 cache setup by /* We check for the presence of an L3 cache setup by
* the firmware. If any, we disable NAP capability as * the firmware. If any, we disable NAP capability as
* it's known to be bogus on rev 2.1 and earlier * it's known to be bogus on rev 2.1 and earlier
...@@ -270,6 +279,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) ...@@ -270,6 +279,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM)
sync sync
isync isync
blr blr
SYM_FUNC_END(setup_745x_specifics)
/* /*
* Initialize the FPU registers. This is needed to work around an errata * Initialize the FPU registers. This is needed to work around an errata
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
* Benjamin Herrenschmidt <benh@kernel.crashing.org> * Benjamin Herrenschmidt <benh@kernel.crashing.org>
*/ */
#include <linux/linkage.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/cputable.h> #include <asm/cputable.h>
...@@ -274,7 +276,7 @@ _GLOBAL(flush_dcache_L1) ...@@ -274,7 +276,7 @@ _GLOBAL(flush_dcache_L1)
blr blr
has_L2_cache: SYM_FUNC_START_LOCAL(has_L2_cache)
/* skip L2 cache on P2040/P2040E as they have no L2 cache */ /* skip L2 cache on P2040/P2040E as they have no L2 cache */
mfspr r3, SPRN_SVR mfspr r3, SPRN_SVR
/* shift right by 8 bits and clear E bit of SVR */ /* shift right by 8 bits and clear E bit of SVR */
...@@ -290,9 +292,10 @@ has_L2_cache: ...@@ -290,9 +292,10 @@ has_L2_cache:
1: 1:
li r3, 0 li r3, 0
blr blr
SYM_FUNC_END(has_L2_cache)
/* flush backside L2 cache */ /* flush backside L2 cache */
flush_backside_L2_cache: SYM_FUNC_START_LOCAL(flush_backside_L2_cache)
mflr r10 mflr r10
bl has_L2_cache bl has_L2_cache
mtlr r10 mtlr r10
...@@ -313,6 +316,7 @@ flush_backside_L2_cache: ...@@ -313,6 +316,7 @@ flush_backside_L2_cache:
bne 1b bne 1b
2: 2:
blr blr
SYM_FUNC_END(flush_backside_L2_cache)
_GLOBAL(cpu_down_flush_e500v2) _GLOBAL(cpu_down_flush_e500v2)
mflr r0 mflr r0
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/sys.h> #include <linux/sys.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/linkage.h>
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/mmu.h> #include <asm/mmu.h>
...@@ -74,17 +76,18 @@ _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler) ...@@ -74,17 +76,18 @@ _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */ #endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */
#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32) #if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
.globl __kuep_lock SYM_FUNC_START(__kuep_lock)
__kuep_lock:
lwz r9, THREAD+THSR0(r2) lwz r9, THREAD+THSR0(r2)
update_user_segments_by_4 r9, r10, r11, r12 update_user_segments_by_4 r9, r10, r11, r12
blr blr
SYM_FUNC_END(__kuep_lock)
__kuep_unlock: SYM_FUNC_START_LOCAL(__kuep_unlock)
lwz r9, THREAD+THSR0(r2) lwz r9, THREAD+THSR0(r2)
rlwinm r9,r9,0,~SR_NX rlwinm r9,r9,0,~SR_NX
update_user_segments_by_4 r9, r10, r11, r12 update_user_segments_by_4 r9, r10, r11, r12
blr blr
SYM_FUNC_END(__kuep_unlock)
.macro kuep_lock .macro kuep_lock
bl __kuep_lock bl __kuep_lock
......
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/linkage.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/mmu.h> #include <asm/mmu.h>
...@@ -662,7 +664,7 @@ start_here: ...@@ -662,7 +664,7 @@ start_here:
* kernel initialization. This maps the first 32 MBytes of memory 1:1 * kernel initialization. This maps the first 32 MBytes of memory 1:1
* virtual to physical and more importantly sets the cache mode. * virtual to physical and more importantly sets the cache mode.
*/ */
initial_mmu: SYM_FUNC_START_LOCAL(initial_mmu)
tlbia /* Invalidate all TLB entries */ tlbia /* Invalidate all TLB entries */
isync isync
...@@ -711,6 +713,7 @@ initial_mmu: ...@@ -711,6 +713,7 @@ initial_mmu:
mtspr SPRN_EVPR,r0 mtspr SPRN_EVPR,r0
blr blr
SYM_FUNC_END(initial_mmu)
_GLOBAL(abort) _GLOBAL(abort)
mfspr r13,SPRN_DBCR0 mfspr r13,SPRN_DBCR0
......
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <linux/linkage.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/mmu.h> #include <asm/mmu.h>
...@@ -885,7 +887,7 @@ KernelSPE: ...@@ -885,7 +887,7 @@ KernelSPE:
* Translate the effec addr in r3 to phys addr. The phys addr will be put * Translate the effec addr in r3 to phys addr. The phys addr will be put
* into r3(higher 32bit) and r4(lower 32bit) * into r3(higher 32bit) and r4(lower 32bit)
*/ */
get_phys_addr: SYM_FUNC_START_LOCAL(get_phys_addr)
mfmsr r8 mfmsr r8
mfspr r9,SPRN_PID mfspr r9,SPRN_PID
rlwinm r9,r9,16,0x3fff0000 /* turn PID into MAS6[SPID] */ rlwinm r9,r9,16,0x3fff0000 /* turn PID into MAS6[SPID] */
...@@ -907,6 +909,7 @@ get_phys_addr: ...@@ -907,6 +909,7 @@ get_phys_addr:
mfspr r3,SPRN_MAS7 mfspr r3,SPRN_MAS7
#endif #endif
blr blr
SYM_FUNC_END(get_phys_addr)
/* /*
* Global functions * Global functions
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <linux/magic.h> #include <linux/magic.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/linkage.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/mmu.h> #include <asm/mmu.h>
...@@ -625,7 +627,7 @@ start_here: ...@@ -625,7 +627,7 @@ start_here:
* 24 Mbytes of data, and the 512k IMMR space. Anything not covered by * 24 Mbytes of data, and the 512k IMMR space. Anything not covered by
* these mappings is mapped by page tables. * these mappings is mapped by page tables.
*/ */
initial_mmu: SYM_FUNC_START_LOCAL(initial_mmu)
li r8, 0 li r8, 0
mtspr SPRN_MI_CTR, r8 /* remove PINNED ITLB entries */ mtspr SPRN_MI_CTR, r8 /* remove PINNED ITLB entries */
lis r10, MD_TWAM@h lis r10, MD_TWAM@h
...@@ -686,6 +688,7 @@ initial_mmu: ...@@ -686,6 +688,7 @@ initial_mmu:
#endif #endif
mtspr SPRN_DER, r8 mtspr SPRN_DER, r8
blr blr
SYM_FUNC_END(initial_mmu)
_GLOBAL(mmu_pin_tlb) _GLOBAL(mmu_pin_tlb)
lis r9, (1f - PAGE_OFFSET)@h lis r9, (1f - PAGE_OFFSET)@h
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <linux/linkage.h>
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/mmu.h> #include <asm/mmu.h>
...@@ -877,7 +879,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE) ...@@ -877,7 +879,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
* Load stuff into the MMU. Intended to be called with * Load stuff into the MMU. Intended to be called with
* IR=0 and DR=0. * IR=0 and DR=0.
*/ */
early_hash_table: SYM_FUNC_START_LOCAL(early_hash_table)
sync /* Force all PTE updates to finish */ sync /* Force all PTE updates to finish */
isync isync
tlbia /* Clear all TLB entries */ tlbia /* Clear all TLB entries */
...@@ -888,8 +890,9 @@ early_hash_table: ...@@ -888,8 +890,9 @@ early_hash_table:
ori r6, r6, 3 /* 256kB table */ ori r6, r6, 3 /* 256kB table */
mtspr SPRN_SDR1, r6 mtspr SPRN_SDR1, r6
blr blr
SYM_FUNC_END(early_hash_table)
load_up_mmu: SYM_FUNC_START_LOCAL(load_up_mmu)
sync /* Force all PTE updates to finish */ sync /* Force all PTE updates to finish */
isync isync
tlbia /* Clear all TLB entries */ tlbia /* Clear all TLB entries */
...@@ -918,6 +921,7 @@ BEGIN_MMU_FTR_SECTION ...@@ -918,6 +921,7 @@ BEGIN_MMU_FTR_SECTION
LOAD_BAT(7,r3,r4,r5) LOAD_BAT(7,r3,r4,r5)
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
blr blr
SYM_FUNC_END(load_up_mmu)
_GLOBAL(load_segment_registers) _GLOBAL(load_segment_registers)
li r0, NUM_USER_SEGMENTS /* load up user segment register values */ li r0, NUM_USER_SEGMENTS /* load up user segment register values */
...@@ -1028,7 +1032,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE) ...@@ -1028,7 +1032,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
* this makes sure it's done. * this makes sure it's done.
* -- Cort * -- Cort
*/ */
clear_bats: SYM_FUNC_START_LOCAL(clear_bats)
li r10,0 li r10,0
mtspr SPRN_DBAT0U,r10 mtspr SPRN_DBAT0U,r10
...@@ -1072,6 +1076,7 @@ BEGIN_MMU_FTR_SECTION ...@@ -1072,6 +1076,7 @@ BEGIN_MMU_FTR_SECTION
mtspr SPRN_IBAT7L,r10 mtspr SPRN_IBAT7L,r10
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
blr blr
SYM_FUNC_END(clear_bats)
_GLOBAL(update_bats) _GLOBAL(update_bats)
lis r4, 1f@h lis r4, 1f@h
...@@ -1108,15 +1113,16 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) ...@@ -1108,15 +1113,16 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
mtspr SPRN_SRR1, r6 mtspr SPRN_SRR1, r6
rfi rfi
flush_tlbs: SYM_FUNC_START_LOCAL(flush_tlbs)
lis r10, 0x40 lis r10, 0x40
1: addic. r10, r10, -0x1000 1: addic. r10, r10, -0x1000
tlbie r10 tlbie r10
bgt 1b bgt 1b
sync sync
blr blr
SYM_FUNC_END(flush_tlbs)
mmu_off: SYM_FUNC_START_LOCAL(mmu_off)
addi r4, r3, __after_mmu_off - _start addi r4, r3, __after_mmu_off - _start
mfmsr r3 mfmsr r3
andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */ andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
...@@ -1128,9 +1134,10 @@ mmu_off: ...@@ -1128,9 +1134,10 @@ mmu_off:
mtspr SPRN_SRR1,r3 mtspr SPRN_SRR1,r3
sync sync
rfi rfi
SYM_FUNC_END(mmu_off)
/* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */ /* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
initial_bats: SYM_FUNC_START_LOCAL(initial_bats)
lis r11,PAGE_OFFSET@h lis r11,PAGE_OFFSET@h
tophys(r8,r11) tophys(r8,r11)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -1146,9 +1153,10 @@ initial_bats: ...@@ -1146,9 +1153,10 @@ initial_bats:
mtspr SPRN_IBAT0U,r11 mtspr SPRN_IBAT0U,r11
isync isync
blr blr
SYM_FUNC_END(initial_bats)
#ifdef CONFIG_BOOTX_TEXT #ifdef CONFIG_BOOTX_TEXT
setup_disp_bat: SYM_FUNC_START_LOCAL(setup_disp_bat)
/* /*
* setup the display bat prepared for us in prom.c * setup the display bat prepared for us in prom.c
*/ */
...@@ -1164,10 +1172,11 @@ setup_disp_bat: ...@@ -1164,10 +1172,11 @@ setup_disp_bat:
mtspr SPRN_DBAT3L,r8 mtspr SPRN_DBAT3L,r8
mtspr SPRN_DBAT3U,r11 mtspr SPRN_DBAT3U,r11
blr blr
SYM_FUNC_END(setup_disp_bat)
#endif /* CONFIG_BOOTX_TEXT */ #endif /* CONFIG_BOOTX_TEXT */
#ifdef CONFIG_PPC_EARLY_DEBUG_CPM #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
setup_cpm_bat: SYM_FUNC_START_LOCAL(setup_cpm_bat)
lis r8, 0xf000 lis r8, 0xf000
ori r8, r8, 0x002a ori r8, r8, 0x002a
mtspr SPRN_DBAT1L, r8 mtspr SPRN_DBAT1L, r8
...@@ -1177,10 +1186,11 @@ setup_cpm_bat: ...@@ -1177,10 +1186,11 @@ setup_cpm_bat:
mtspr SPRN_DBAT1U, r11 mtspr SPRN_DBAT1U, r11
blr blr
SYM_FUNC_END(setup_cpm_bat)
#endif #endif
#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
setup_usbgecko_bat: SYM_FUNC_START_LOCAL(setup_usbgecko_bat)
/* prepare a BAT for early io */ /* prepare a BAT for early io */
#if defined(CONFIG_GAMECUBE) #if defined(CONFIG_GAMECUBE)
lis r8, 0x0c00 lis r8, 0x0c00
...@@ -1199,6 +1209,7 @@ setup_usbgecko_bat: ...@@ -1199,6 +1209,7 @@ setup_usbgecko_bat:
mtspr SPRN_DBAT1L, r8 mtspr SPRN_DBAT1L, r8
mtspr SPRN_DBAT1U, r11 mtspr SPRN_DBAT1U, r11
blr blr
SYM_FUNC_END(setup_usbgecko_bat)
#endif #endif
.data .data
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/linkage.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/cputable.h> #include <asm/cputable.h>
...@@ -400,7 +402,7 @@ _ASM_NOKPROBE_SYMBOL(swsusp_arch_resume) ...@@ -400,7 +402,7 @@ _ASM_NOKPROBE_SYMBOL(swsusp_arch_resume)
/* FIXME:This construct is actually not useful since we don't shut /* FIXME:This construct is actually not useful since we don't shut
* down the instruction MMU, we could just flip back MSR-DR on. * down the instruction MMU, we could just flip back MSR-DR on.
*/ */
turn_on_mmu: SYM_FUNC_START_LOCAL(turn_on_mmu)
mflr r4 mflr r4
mtsrr0 r4 mtsrr0 r4
mtsrr1 r3 mtsrr1 r3
...@@ -408,4 +410,5 @@ turn_on_mmu: ...@@ -408,4 +410,5 @@ turn_on_mmu:
isync isync
rfi rfi
_ASM_NOKPROBE_SYMBOL(turn_on_mmu) _ASM_NOKPROBE_SYMBOL(turn_on_mmu)
SYM_FUNC_END(turn_on_mmu)
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
*/ */
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <linux/linkage.h>
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/mmu.h> #include <asm/mmu.h>
...@@ -110,18 +112,22 @@ FPS_THREE_IN(fsel) ...@@ -110,18 +112,22 @@ FPS_THREE_IN(fsel)
* R8 = (double*)&param3 [load_three] * R8 = (double*)&param3 [load_three]
* LR = instruction call function * LR = instruction call function
*/ */
fpd_load_three: SYM_FUNC_START_LOCAL(fpd_load_three)
lfd 2,0(r8) /* load param3 */ lfd 2,0(r8) /* load param3 */
fpd_load_two: SYM_FUNC_START_LOCAL(fpd_load_two)
lfd 1,0(r7) /* load param2 */ lfd 1,0(r7) /* load param2 */
fpd_load_one: SYM_FUNC_START_LOCAL(fpd_load_one)
lfd 0,0(r6) /* load param1 */ lfd 0,0(r6) /* load param1 */
fpd_load_none: SYM_FUNC_START_LOCAL(fpd_load_none)
lfd 3,0(r3) /* load up fpscr value */ lfd 3,0(r3) /* load up fpscr value */
MTFSF_L(3) MTFSF_L(3)
lwz r6, 0(r4) /* load cr */ lwz r6, 0(r4) /* load cr */
mtcr r6 mtcr r6
blr blr
SYM_FUNC_END(fpd_load_none)
SYM_FUNC_END(fpd_load_one)
SYM_FUNC_END(fpd_load_two)
SYM_FUNC_END(fpd_load_three)
/* /*
* End of double instruction processing * End of double instruction processing
...@@ -131,13 +137,14 @@ fpd_load_none: ...@@ -131,13 +137,14 @@ fpd_load_none:
* R5 = (double*)&result * R5 = (double*)&result
* LR = caller of instruction call function * LR = caller of instruction call function
*/ */
fpd_return: SYM_FUNC_START_LOCAL(fpd_return)
mfcr r6 mfcr r6
stfd 0,0(r5) /* save result */ stfd 0,0(r5) /* save result */
mffs 0 mffs 0
stfd 0,0(r3) /* save new fpscr value */ stfd 0,0(r3) /* save new fpscr value */
stw r6,0(r4) /* save new cr value */ stw r6,0(r4) /* save new cr value */
blr blr
SYM_FUNC_END(fpd_return)
/* /*
* Double operation with no input operand * Double operation with no input operand
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -178,7 +180,8 @@ sram_code: ...@@ -178,7 +180,8 @@ sram_code:
/* local udelay in sram is needed */ /* local udelay in sram is needed */
udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */ SYM_FUNC_START_LOCAL(udelay)
/* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */
mullw r12, r12, r11 mullw r12, r12, r11
mftb r13 /* start */ mftb r13 /* start */
add r12, r13, r12 /* end */ add r12, r13, r12 /* end */
...@@ -187,6 +190,7 @@ sram_code: ...@@ -187,6 +190,7 @@ sram_code:
cmp cr0, r13, r12 cmp cr0, r13, r12
blt 1b blt 1b
blr blr
SYM_FUNC_END(udelay)
sram_code_end: sram_code_end:
...@@ -271,7 +275,7 @@ _ASM_NOKPROBE_SYMBOL(lite5200_wakeup) ...@@ -271,7 +275,7 @@ _ASM_NOKPROBE_SYMBOL(lite5200_wakeup)
SAVE_SR(n+2, addr+2); \ SAVE_SR(n+2, addr+2); \
SAVE_SR(n+3, addr+3); SAVE_SR(n+3, addr+3);
save_regs: SYM_FUNC_START_LOCAL(save_regs)
stw r0, 0(r4) stw r0, 0(r4)
stw r1, 0x4(r4) stw r1, 0x4(r4)
stw r2, 0x8(r4) stw r2, 0x8(r4)
...@@ -317,6 +321,7 @@ save_regs: ...@@ -317,6 +321,7 @@ save_regs:
SAVE_SPRN(TBRU, 0x5b) SAVE_SPRN(TBRU, 0x5b)
blr blr
SYM_FUNC_END(save_regs)
/* restore registers */ /* restore registers */
...@@ -336,7 +341,7 @@ save_regs: ...@@ -336,7 +341,7 @@ save_regs:
LOAD_SR(n+2, addr+2); \ LOAD_SR(n+2, addr+2); \
LOAD_SR(n+3, addr+3); LOAD_SR(n+3, addr+3);
restore_regs: SYM_FUNC_START_LOCAL(restore_regs)
lis r4, registers@h lis r4, registers@h
ori r4, r4, registers@l ori r4, r4, registers@l
...@@ -393,6 +398,7 @@ restore_regs: ...@@ -393,6 +398,7 @@ restore_regs:
blr blr
_ASM_NOKPROBE_SYMBOL(restore_regs) _ASM_NOKPROBE_SYMBOL(restore_regs)
SYM_FUNC_END(restore_regs)
...@@ -403,7 +409,7 @@ _ASM_NOKPROBE_SYMBOL(restore_regs) ...@@ -403,7 +409,7 @@ _ASM_NOKPROBE_SYMBOL(restore_regs)
* Flush data cache * Flush data cache
* Do this by just reading lots of stuff into the cache. * Do this by just reading lots of stuff into the cache.
*/ */
flush_data_cache: SYM_FUNC_START_LOCAL(flush_data_cache)
lis r3,CONFIG_KERNEL_START@h lis r3,CONFIG_KERNEL_START@h
ori r3,r3,CONFIG_KERNEL_START@l ori r3,r3,CONFIG_KERNEL_START@l
li r4,NUM_CACHE_LINES li r4,NUM_CACHE_LINES
...@@ -413,3 +419,4 @@ flush_data_cache: ...@@ -413,3 +419,4 @@ flush_data_cache:
addi r3,r3,L1_CACHE_BYTES /* Next line, please */ addi r3,r3,L1_CACHE_BYTES /* Next line, please */
bdnz 1b bdnz 1b
blr blr
SYM_FUNC_END(flush_data_cache)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment