Commit 0798b1db authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile

* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile: (26 commits)
  arch/tile: prefer "tilepro" as the name of the 32-bit architecture
  compat: include aio_abi.h for aio_context_t
  arch/tile: cleanups for tilegx compat mode
  arch/tile: allocate PCI IRQs later in boot
  arch/tile: support signal "exception-trace" hook
  arch/tile: use better definitions of xchg() and cmpxchg()
  include/linux/compat.h: coding-style fixes
  tile: add an RTC driver for the Tilera hypervisor
  arch/tile: finish enabling support for TILE-Gx 64-bit chip
  compat: fixes to allow working with tile arch
  arch/tile: update defconfig file to something more useful
  tile: do_hardwall_trap: do not play with task->sighand
  tile: replace mm->cpu_vm_mask with mm_cpumask()
  tile,mn10300: add device parameter to dma_cache_sync()
  audit: support the "standard" <asm-generic/unistd.h>
  arch/tile: clarify flush_buffer()/finv_buffer() function names
  arch/tile: kernel-related cleanups from removing static page size
  arch/tile: various header improvements for building drivers
  arch/tile: disable GX prefetcher during cache flush
  arch/tile: tolerate disabling CONFIG_BLK_DEV_INITRD
  ...
parents ad363e09 6738d321
......@@ -220,6 +220,14 @@ ifeq ($(ARCH),sh64)
SRCARCH := sh
endif
# Additional ARCH settings for tile
ifeq ($(ARCH),tilepro)
SRCARCH := tile
endif
ifeq ($(ARCH),tilegx)
SRCARCH := tile
endif
# Where to locate arch specific headers
hdr-arch := $(SRCARCH)
......
......@@ -339,6 +339,14 @@ config NO_IOPORT
source "drivers/pci/Kconfig"
config HOTPLUG
bool "Support for hot-pluggable devices"
---help---
Say Y here if you want to plug devices into your computer while
the system is running, and be able to use them quickly. In many
cases, the devices can likewise be unplugged at any time too.
One well-known example of this is USB.
source "drivers/pci/hotplug/Kconfig"
endmenu
......
CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="usr/contents.txt"
CONFIG_EXPERT=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_100=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
CONFIG_IPV6=y
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_NETDEVICES=y
CONFIG_TUN=y
# CONFIG_NETDEV_10000 is not set
# CONFIG_WLAN is not set
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
# CONFIG_HID_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_INTF_SYSFS is not set
# CONFIG_RTC_INTF_PROC is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=m
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_NFS_FS=m
CONFIG_NFS_V3=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_FRAME_WARN=2048
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_SPINLOCK_SLEEP=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_VM=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_DEBUG_EXTRA_FLAGS="-femit-struct-debug-baseonly"
This diff is collapsed.
This diff is collapsed.
/*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/*
* @file
* Global header file.
* This header file specifies defines for TILE-Gx.
*/
#ifndef __ARCH_CHIP_H__
#define __ARCH_CHIP_H__
/** Specify chip version.
* When possible, prefer the CHIP_xxx symbols below for future-proofing.
* This is intended for cross-compiling; native compilation should
* use the predefined __tile_chip__ symbol.
*/
#define TILE_CHIP 10
/** Specify chip revision.
* This provides for the case of a respin of a particular chip type;
* the normal value for this symbol is "0".
* This is intended for cross-compiling; native compilation should
* use the predefined __tile_chip_rev__ symbol.
*/
#define TILE_CHIP_REV 0
/** The name of this architecture. */
#define CHIP_ARCH_NAME "tilegx"
/** The ELF e_machine type for binaries for this chip. */
#define CHIP_ELF_TYPE() EM_TILEGX
/** The alternate ELF e_machine type for binaries for this chip. */
#define CHIP_COMPAT_ELF_TYPE() 0x2597
/** What is the native word size of the machine? */
#define CHIP_WORD_SIZE() 64
/** How many bits of a virtual address are used. Extra bits must be
* the sign extension of the low bits.
*/
#define CHIP_VA_WIDTH() 42
/** How many bits are in a physical address? */
#define CHIP_PA_WIDTH() 40
/** Size of the L2 cache, in bytes. */
#define CHIP_L2_CACHE_SIZE() 262144
/** Log size of an L2 cache line in bytes. */
#define CHIP_L2_LOG_LINE_SIZE() 6
/** Size of an L2 cache line, in bytes. */
#define CHIP_L2_LINE_SIZE() (1 << CHIP_L2_LOG_LINE_SIZE())
/** Associativity of the L2 cache. */
#define CHIP_L2_ASSOC() 8
/** Size of the L1 data cache, in bytes. */
#define CHIP_L1D_CACHE_SIZE() 32768
/** Log size of an L1 data cache line in bytes. */
#define CHIP_L1D_LOG_LINE_SIZE() 6
/** Size of an L1 data cache line, in bytes. */
#define CHIP_L1D_LINE_SIZE() (1 << CHIP_L1D_LOG_LINE_SIZE())
/** Associativity of the L1 data cache. */
#define CHIP_L1D_ASSOC() 2
/** Size of the L1 instruction cache, in bytes. */
#define CHIP_L1I_CACHE_SIZE() 32768
/** Log size of an L1 instruction cache line in bytes. */
#define CHIP_L1I_LOG_LINE_SIZE() 6
/** Size of an L1 instruction cache line, in bytes. */
#define CHIP_L1I_LINE_SIZE() (1 << CHIP_L1I_LOG_LINE_SIZE())
/** Associativity of the L1 instruction cache. */
#define CHIP_L1I_ASSOC() 2
/** Stride with which flush instructions must be issued. */
#define CHIP_FLUSH_STRIDE() CHIP_L2_LINE_SIZE()
/** Stride with which inv instructions must be issued. */
#define CHIP_INV_STRIDE() CHIP_L2_LINE_SIZE()
/** Stride with which finv instructions must be issued. */
#define CHIP_FINV_STRIDE() CHIP_L2_LINE_SIZE()
/** Can the local cache coherently cache data that is homed elsewhere? */
#define CHIP_HAS_COHERENT_LOCAL_CACHE() 1
/** How many simultaneous outstanding victims can the L2 cache have? */
#define CHIP_MAX_OUTSTANDING_VICTIMS() 128
/** Does the TLB support the NC and NOALLOC bits? */
#define CHIP_HAS_NC_AND_NOALLOC_BITS() 1
/** Does the chip support hash-for-home caching? */
#define CHIP_HAS_CBOX_HOME_MAP() 1
/** Number of entries in the chip's home map tables. */
#define CHIP_CBOX_HOME_MAP_SIZE() 128
/** Do uncacheable requests miss in the cache regardless of whether
* there is matching data? */
#define CHIP_HAS_ENFORCED_UNCACHEABLE_REQUESTS() 1
/** Does the mf instruction wait for victims? */
#define CHIP_HAS_MF_WAITS_FOR_VICTIMS() 0
/** Does the chip have an "inv" instruction that doesn't also flush? */
#define CHIP_HAS_INV() 1
/** Does the chip have a "wh64" instruction? */
#define CHIP_HAS_WH64() 1
/** Does this chip have a 'dword_align' instruction? */
#define CHIP_HAS_DWORD_ALIGN() 0
/** Number of performance counters. */
#define CHIP_PERFORMANCE_COUNTERS() 4
/** Does this chip have auxiliary performance counters? */
#define CHIP_HAS_AUX_PERF_COUNTERS() 1
/** Is the CBOX_MSR1 SPR supported? */
#define CHIP_HAS_CBOX_MSR1() 0
/** Is the TILE_RTF_HWM SPR supported? */
#define CHIP_HAS_TILE_RTF_HWM() 1
/** Is the TILE_WRITE_PENDING SPR supported? */
#define CHIP_HAS_TILE_WRITE_PENDING() 0
/** Is the PROC_STATUS SPR supported? */
#define CHIP_HAS_PROC_STATUS_SPR() 1
/** Is the DSTREAM_PF SPR supported? */
#define CHIP_HAS_DSTREAM_PF() 1
/** Log of the number of mshims we have. */
#define CHIP_LOG_NUM_MSHIMS() 2
/** Are the bases of the interrupt vector areas fixed? */
#define CHIP_HAS_FIXED_INTVEC_BASE() 0
/** Are the interrupt masks split up into 2 SPRs? */
#define CHIP_HAS_SPLIT_INTR_MASK() 0
/** Is the cycle count split up into 2 SPRs? */
#define CHIP_HAS_SPLIT_CYCLE() 0
/** Does the chip have a static network? */
#define CHIP_HAS_SN() 0
/** Does the chip have a static network processor? */
#define CHIP_HAS_SN_PROC() 0
/** Size of the L1 static network processor instruction cache, in bytes. */
/* #define CHIP_L1SNI_CACHE_SIZE() -- does not apply to chip 10 */
/** Does the chip have DMA support in each tile? */
#define CHIP_HAS_TILE_DMA() 0
/** Does the chip have the second revision of the directly accessible
* dynamic networks? This encapsulates a number of characteristics,
* including the absence of the catch-all, the absence of inline message
* tags, the absence of support for network context-switching, and so on.
*/
#define CHIP_HAS_REV1_XDN() 1
/** Does the chip have cmpexch and similar (fetchadd, exch, etc.)? */
#define CHIP_HAS_CMPEXCH() 1
/** Does the chip have memory-mapped I/O support? */
#define CHIP_HAS_MMIO() 1
/** Does the chip have post-completion interrupts? */
#define CHIP_HAS_POST_COMPLETION_INTERRUPTS() 1
/** Does the chip have native single step support? */
#define CHIP_HAS_SINGLE_STEP() 1
#ifndef __OPEN_SOURCE__ /* features only relevant to hypervisor-level code */
/** How many entries are present in the instruction TLB? */
#define CHIP_ITLB_ENTRIES() 16
/** How many entries are present in the data TLB? */
#define CHIP_DTLB_ENTRIES() 32
/** How many MAF entries does the XAUI shim have? */
#define CHIP_XAUI_MAF_ENTRIES() 32
/** Does the memory shim have a source-id table? */
#define CHIP_HAS_MSHIM_SRCID_TABLE() 0
/** Does the L1 instruction cache clear on reset? */
#define CHIP_HAS_L1I_CLEAR_ON_RESET() 1
/** Does the chip come out of reset with valid coordinates on all tiles?
* Note that if defined, this also implies that the upper left is 1,1.
*/
#define CHIP_HAS_VALID_TILE_COORD_RESET() 1
/** Does the chip have unified packet formats? */
#define CHIP_HAS_UNIFIED_PACKET_FORMATS() 1
/** Does the chip support write reordering? */
#define CHIP_HAS_WRITE_REORDERING() 1
/** Does the chip support Y-X routing as well as X-Y? */
#define CHIP_HAS_Y_X_ROUTING() 1
/** Is INTCTRL_3 managed with the correct MPL? */
#define CHIP_HAS_INTCTRL_3_STATUS_FIX() 1
/** Is it possible to configure the chip to be big-endian? */
#define CHIP_HAS_BIG_ENDIAN_CONFIG() 1
/** Is the CACHE_RED_WAY_OVERRIDDEN SPR supported? */
#define CHIP_HAS_CACHE_RED_WAY_OVERRIDDEN() 0
/** Is the DIAG_TRACE_WAY SPR supported? */
#define CHIP_HAS_DIAG_TRACE_WAY() 0
/** Is the MEM_STRIPE_CONFIG SPR supported? */
#define CHIP_HAS_MEM_STRIPE_CONFIG() 1
/** Are the TLB_PERF SPRs supported? */
#define CHIP_HAS_TLB_PERF() 1
/** Is the VDN_SNOOP_SHIM_CTL SPR supported? */
#define CHIP_HAS_VDN_SNOOP_SHIM_CTL() 0
/** Does the chip support rev1 DMA packets? */
#define CHIP_HAS_REV1_DMA_PACKETS() 1
/** Does the chip have an IPI shim? */
#define CHIP_HAS_IPI() 1
#endif /* !__OPEN_SOURCE__ */
#endif /* __ARCH_CHIP_H__ */
......@@ -16,7 +16,7 @@
/**
* @file
*
* Support for invalidating bytes in the instruction
* Support for invalidating bytes in the instruction cache.
*/
#ifndef __ARCH_ICACHE_H__
......@@ -30,11 +30,10 @@
*
* @param addr The start of memory to be invalidated.
* @param size The number of bytes to be invalidated.
* @param page_size The system's page size, typically the PAGE_SIZE constant
* in sys/page.h. This value must be a power of two no larger
* than the page containing the code to be invalidated. If the value
* is smaller than the actual page size, this function will still
* work, but may run slower than necessary.
* @param page_size The system's page size, e.g. getpagesize() in userspace.
* This value must be a power of two no larger than the page containing
* the code to be invalidated. If the value is smaller than the actual page
* size, this function will still work, but may run slower than necessary.
*/
static __inline void
invalidate_icache(const void* addr, unsigned long size,
......
/*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef __ARCH_INTERRUPTS_H__
#define __ARCH_INTERRUPTS_H__
/** Mask for an interrupt. */
#ifdef __ASSEMBLER__
/* Note: must handle breaking interrupts into high and low words manually. */
#define INT_MASK(intno) (1 << (intno))
#else
#define INT_MASK(intno) (1ULL << (intno))
#endif
/** Where a given interrupt executes */
#define INTERRUPT_VECTOR(i, pl) (0xFC000000 + ((pl) << 24) + ((i) << 8))
/** Where to store a vector for a given interrupt. */
#define USER_INTERRUPT_VECTOR(i) INTERRUPT_VECTOR(i, 0)
/** The base address of user-level interrupts. */
#define USER_INTERRUPT_VECTOR_BASE INTERRUPT_VECTOR(0, 0)
/** Additional synthetic interrupt. */
#define INT_BREAKPOINT (63)
#define INT_MEM_ERROR 0
#define INT_SINGLE_STEP_3 1
#define INT_SINGLE_STEP_2 2
#define INT_SINGLE_STEP_1 3
#define INT_SINGLE_STEP_0 4
#define INT_IDN_COMPLETE 5
#define INT_UDN_COMPLETE 6
#define INT_ITLB_MISS 7
#define INT_ILL 8
#define INT_GPV 9
#define INT_IDN_ACCESS 10
#define INT_UDN_ACCESS 11
#define INT_SWINT_3 12
#define INT_SWINT_2 13
#define INT_SWINT_1 14
#define INT_SWINT_0 15
#define INT_ILL_TRANS 16
#define INT_UNALIGN_DATA 17
#define INT_DTLB_MISS 18
#define INT_DTLB_ACCESS 19
#define INT_IDN_FIREWALL 20
#define INT_UDN_FIREWALL 21
#define INT_TILE_TIMER 22
#define INT_AUX_TILE_TIMER 23
#define INT_IDN_TIMER 24
#define INT_UDN_TIMER 25
#define INT_IDN_AVAIL 26
#define INT_UDN_AVAIL 27
#define INT_IPI_3 28
#define INT_IPI_2 29
#define INT_IPI_1 30
#define INT_IPI_0 31
#define INT_PERF_COUNT 32
#define INT_AUX_PERF_COUNT 33
#define INT_INTCTRL_3 34
#define INT_INTCTRL_2 35
#define INT_INTCTRL_1 36
#define INT_INTCTRL_0 37
#define INT_BOOT_ACCESS 38
#define INT_WORLD_ACCESS 39
#define INT_I_ASID 40
#define INT_D_ASID 41
#define INT_DOUBLE_FAULT 42
#define NUM_INTERRUPTS 43
#ifndef __ASSEMBLER__
#define QUEUED_INTERRUPTS ( \
INT_MASK(INT_MEM_ERROR) | \
INT_MASK(INT_IDN_COMPLETE) | \
INT_MASK(INT_UDN_COMPLETE) | \
INT_MASK(INT_IDN_FIREWALL) | \
INT_MASK(INT_UDN_FIREWALL) | \
INT_MASK(INT_TILE_TIMER) | \
INT_MASK(INT_AUX_TILE_TIMER) | \
INT_MASK(INT_IDN_TIMER) | \
INT_MASK(INT_UDN_TIMER) | \
INT_MASK(INT_IDN_AVAIL) | \
INT_MASK(INT_UDN_AVAIL) | \
INT_MASK(INT_IPI_3) | \
INT_MASK(INT_IPI_2) | \
INT_MASK(INT_IPI_1) | \
INT_MASK(INT_IPI_0) | \
INT_MASK(INT_PERF_COUNT) | \
INT_MASK(INT_AUX_PERF_COUNT) | \
INT_MASK(INT_INTCTRL_3) | \
INT_MASK(INT_INTCTRL_2) | \
INT_MASK(INT_INTCTRL_1) | \
INT_MASK(INT_INTCTRL_0) | \
INT_MASK(INT_BOOT_ACCESS) | \
INT_MASK(INT_WORLD_ACCESS) | \
INT_MASK(INT_I_ASID) | \
INT_MASK(INT_D_ASID) | \
INT_MASK(INT_DOUBLE_FAULT) | \
0)
#define NONQUEUED_INTERRUPTS ( \
INT_MASK(INT_SINGLE_STEP_3) | \
INT_MASK(INT_SINGLE_STEP_2) | \
INT_MASK(INT_SINGLE_STEP_1) | \
INT_MASK(INT_SINGLE_STEP_0) | \
INT_MASK(INT_ITLB_MISS) | \
INT_MASK(INT_ILL) | \
INT_MASK(INT_GPV) | \
INT_MASK(INT_IDN_ACCESS) | \
INT_MASK(INT_UDN_ACCESS) | \
INT_MASK(INT_SWINT_3) | \
INT_MASK(INT_SWINT_2) | \
INT_MASK(INT_SWINT_1) | \
INT_MASK(INT_SWINT_0) | \
INT_MASK(INT_ILL_TRANS) | \
INT_MASK(INT_UNALIGN_DATA) | \
INT_MASK(INT_DTLB_MISS) | \
INT_MASK(INT_DTLB_ACCESS) | \
0)
#define CRITICAL_MASKED_INTERRUPTS ( \
INT_MASK(INT_MEM_ERROR) | \
INT_MASK(INT_SINGLE_STEP_3) | \
INT_MASK(INT_SINGLE_STEP_2) | \
INT_MASK(INT_SINGLE_STEP_1) | \
INT_MASK(INT_SINGLE_STEP_0) | \
INT_MASK(INT_IDN_COMPLETE) | \
INT_MASK(INT_UDN_COMPLETE) | \
INT_MASK(INT_IDN_FIREWALL) | \
INT_MASK(INT_UDN_FIREWALL) | \
INT_MASK(INT_TILE_TIMER) | \
INT_MASK(INT_AUX_TILE_TIMER) | \
INT_MASK(INT_IDN_TIMER) | \
INT_MASK(INT_UDN_TIMER) | \
INT_MASK(INT_IDN_AVAIL) | \
INT_MASK(INT_UDN_AVAIL) | \
INT_MASK(INT_IPI_3) | \
INT_MASK(INT_IPI_2) | \
INT_MASK(INT_IPI_1) | \
INT_MASK(INT_IPI_0) | \
INT_MASK(INT_PERF_COUNT) | \
INT_MASK(INT_AUX_PERF_COUNT) | \
INT_MASK(INT_INTCTRL_3) | \
INT_MASK(INT_INTCTRL_2) | \
INT_MASK(INT_INTCTRL_1) | \
INT_MASK(INT_INTCTRL_0) | \
0)
#define CRITICAL_UNMASKED_INTERRUPTS ( \
INT_MASK(INT_ITLB_MISS) | \
INT_MASK(INT_ILL) | \
INT_MASK(INT_GPV) | \
INT_MASK(INT_IDN_ACCESS) | \
INT_MASK(INT_UDN_ACCESS) | \
INT_MASK(INT_SWINT_3) | \
INT_MASK(INT_SWINT_2) | \
INT_MASK(INT_SWINT_1) | \
INT_MASK(INT_SWINT_0) | \
INT_MASK(INT_ILL_TRANS) | \
INT_MASK(INT_UNALIGN_DATA) | \
INT_MASK(INT_DTLB_MISS) | \
INT_MASK(INT_DTLB_ACCESS) | \
INT_MASK(INT_BOOT_ACCESS) | \
INT_MASK(INT_WORLD_ACCESS) | \
INT_MASK(INT_I_ASID) | \
INT_MASK(INT_D_ASID) | \
INT_MASK(INT_DOUBLE_FAULT) | \
0)
#define MASKABLE_INTERRUPTS ( \
INT_MASK(INT_MEM_ERROR) | \
INT_MASK(INT_SINGLE_STEP_3) | \
INT_MASK(INT_SINGLE_STEP_2) | \
INT_MASK(INT_SINGLE_STEP_1) | \
INT_MASK(INT_SINGLE_STEP_0) | \
INT_MASK(INT_IDN_COMPLETE) | \
INT_MASK(INT_UDN_COMPLETE) | \
INT_MASK(INT_IDN_FIREWALL) | \
INT_MASK(INT_UDN_FIREWALL) | \
INT_MASK(INT_TILE_TIMER) | \
INT_MASK(INT_AUX_TILE_TIMER) | \
INT_MASK(INT_IDN_TIMER) | \
INT_MASK(INT_UDN_TIMER) | \
INT_MASK(INT_IDN_AVAIL) | \
INT_MASK(INT_UDN_AVAIL) | \
INT_MASK(INT_IPI_3) | \
INT_MASK(INT_IPI_2) | \
INT_MASK(INT_IPI_1) | \
INT_MASK(INT_IPI_0) | \
INT_MASK(INT_PERF_COUNT) | \
INT_MASK(INT_AUX_PERF_COUNT) | \
INT_MASK(INT_INTCTRL_3) | \
INT_MASK(INT_INTCTRL_2) | \
INT_MASK(INT_INTCTRL_1) | \
INT_MASK(INT_INTCTRL_0) | \
0)
#define UNMASKABLE_INTERRUPTS ( \
INT_MASK(INT_ITLB_MISS) | \
INT_MASK(INT_ILL) | \
INT_MASK(INT_GPV) | \
INT_MASK(INT_IDN_ACCESS) | \
INT_MASK(INT_UDN_ACCESS) | \
INT_MASK(INT_SWINT_3) | \
INT_MASK(INT_SWINT_2) | \
INT_MASK(INT_SWINT_1) | \
INT_MASK(INT_SWINT_0) | \
INT_MASK(INT_ILL_TRANS) | \
INT_MASK(INT_UNALIGN_DATA) | \
INT_MASK(INT_DTLB_MISS) | \
INT_MASK(INT_DTLB_ACCESS) | \
INT_MASK(INT_BOOT_ACCESS) | \
INT_MASK(INT_WORLD_ACCESS) | \
INT_MASK(INT_I_ASID) | \
INT_MASK(INT_D_ASID) | \
INT_MASK(INT_DOUBLE_FAULT) | \
0)
#define SYNC_INTERRUPTS ( \
INT_MASK(INT_SINGLE_STEP_3) | \
INT_MASK(INT_SINGLE_STEP_2) | \
INT_MASK(INT_SINGLE_STEP_1) | \
INT_MASK(INT_SINGLE_STEP_0) | \
INT_MASK(INT_IDN_COMPLETE) | \
INT_MASK(INT_UDN_COMPLETE) | \
INT_MASK(INT_ITLB_MISS) | \
INT_MASK(INT_ILL) | \
INT_MASK(INT_GPV) | \
INT_MASK(INT_IDN_ACCESS) | \
INT_MASK(INT_UDN_ACCESS) | \
INT_MASK(INT_SWINT_3) | \
INT_MASK(INT_SWINT_2) | \
INT_MASK(INT_SWINT_1) | \
INT_MASK(INT_SWINT_0) | \
INT_MASK(INT_ILL_TRANS) | \
INT_MASK(INT_UNALIGN_DATA) | \
INT_MASK(INT_DTLB_MISS) | \
INT_MASK(INT_DTLB_ACCESS) | \
0)
#define NON_SYNC_INTERRUPTS ( \
INT_MASK(INT_MEM_ERROR) | \
INT_MASK(INT_IDN_FIREWALL) | \
INT_MASK(INT_UDN_FIREWALL) | \
INT_MASK(INT_TILE_TIMER) | \
INT_MASK(INT_AUX_TILE_TIMER) | \
INT_MASK(INT_IDN_TIMER) | \
INT_MASK(INT_UDN_TIMER) | \
INT_MASK(INT_IDN_AVAIL) | \
INT_MASK(INT_UDN_AVAIL) | \
INT_MASK(INT_IPI_3) | \
INT_MASK(INT_IPI_2) | \
INT_MASK(INT_IPI_1) | \
INT_MASK(INT_IPI_0) | \
INT_MASK(INT_PERF_COUNT) | \
INT_MASK(INT_AUX_PERF_COUNT) | \
INT_MASK(INT_INTCTRL_3) | \
INT_MASK(INT_INTCTRL_2) | \
INT_MASK(INT_INTCTRL_1) | \
INT_MASK(INT_INTCTRL_0) | \
INT_MASK(INT_BOOT_ACCESS) | \
INT_MASK(INT_WORLD_ACCESS) | \
INT_MASK(INT_I_ASID) | \
INT_MASK(INT_D_ASID) | \
INT_MASK(INT_DOUBLE_FAULT) | \
0)
#endif /* !__ASSEMBLER__ */
#endif /* !__ARCH_INTERRUPTS_H__ */
......@@ -12,6 +12,15 @@
* more details.
*/
/* Include the proper base SPR definition file. */
#ifdef __tilegx__
#include <arch/spr_def_64.h>
#else
#include <arch/spr_def_32.h>
#endif
#ifdef __KERNEL__
/*
* In addition to including the proper base SPR definition file, depending
* on machine architecture, this file defines several macros which allow
......@@ -29,7 +38,6 @@
#define _concat4(a, b, c, d) __concat4(a, b, c, d)
#ifdef __tilegx__
#include <arch/spr_def_64.h>
/* TILE-Gx dependent, protection-level dependent SPRs. */
......@@ -65,7 +73,6 @@
_concat4(INT_SINGLE_STEP_, CONFIG_KERNEL_PL,,)
#else
#include <arch/spr_def_32.h>
/* TILEPro dependent, protection-level dependent SPRs. */
......@@ -102,3 +109,5 @@
_concat4(SPR_INTCTRL_, CONFIG_KERNEL_PL, _STATUS,)
#define INT_INTCTRL_K \
_concat4(INT_INTCTRL_, CONFIG_KERNEL_PL,,)
#endif /* __KERNEL__ */
/*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef __DOXYGEN__
#ifndef __ARCH_SPR_DEF_H__
#define __ARCH_SPR_DEF_H__
#define SPR_AUX_PERF_COUNT_0 0x2105
#define SPR_AUX_PERF_COUNT_1 0x2106
#define SPR_AUX_PERF_COUNT_CTL 0x2107
#define SPR_AUX_PERF_COUNT_STS 0x2108
#define SPR_CMPEXCH_VALUE 0x2780
#define SPR_CYCLE 0x2781
#define SPR_DONE 0x2705
#define SPR_DSTREAM_PF 0x2706
#define SPR_EVENT_BEGIN 0x2782
#define SPR_EVENT_END 0x2783
#define SPR_EX_CONTEXT_0_0 0x2580
#define SPR_EX_CONTEXT_0_1 0x2581
#define SPR_EX_CONTEXT_0_1__PL_SHIFT 0
#define SPR_EX_CONTEXT_0_1__PL_RMASK 0x3
#define SPR_EX_CONTEXT_0_1__PL_MASK 0x3
#define SPR_EX_CONTEXT_0_1__ICS_SHIFT 2
#define SPR_EX_CONTEXT_0_1__ICS_RMASK 0x1
#define SPR_EX_CONTEXT_0_1__ICS_MASK 0x4
#define SPR_EX_CONTEXT_1_0 0x2480
#define SPR_EX_CONTEXT_1_1 0x2481
#define SPR_EX_CONTEXT_1_1__PL_SHIFT 0
#define SPR_EX_CONTEXT_1_1__PL_RMASK 0x3
#define SPR_EX_CONTEXT_1_1__PL_MASK 0x3
#define SPR_EX_CONTEXT_1_1__ICS_SHIFT 2
#define SPR_EX_CONTEXT_1_1__ICS_RMASK 0x1
#define SPR_EX_CONTEXT_1_1__ICS_MASK 0x4
#define SPR_EX_CONTEXT_2_0 0x2380
#define SPR_EX_CONTEXT_2_1 0x2381
#define SPR_EX_CONTEXT_2_1__PL_SHIFT 0
#define SPR_EX_CONTEXT_2_1__PL_RMASK 0x3
#define SPR_EX_CONTEXT_2_1__PL_MASK 0x3
#define SPR_EX_CONTEXT_2_1__ICS_SHIFT 2
#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4
#define SPR_FAIL 0x2707
#define SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK 0x1
#define SPR_INTCTRL_0_STATUS 0x2505
#define SPR_INTCTRL_1_STATUS 0x2405
#define SPR_INTCTRL_2_STATUS 0x2305
#define SPR_INTERRUPT_CRITICAL_SECTION 0x2708
#define SPR_INTERRUPT_MASK_0 0x2506
#define SPR_INTERRUPT_MASK_1 0x2406
#define SPR_INTERRUPT_MASK_2 0x2306
#define SPR_INTERRUPT_MASK_RESET_0 0x2507
#define SPR_INTERRUPT_MASK_RESET_1 0x2407
#define SPR_INTERRUPT_MASK_RESET_2 0x2307
#define SPR_INTERRUPT_MASK_SET_0 0x2508
#define SPR_INTERRUPT_MASK_SET_1 0x2408
#define SPR_INTERRUPT_MASK_SET_2 0x2308
#define SPR_INTERRUPT_VECTOR_BASE_0 0x2509
#define SPR_INTERRUPT_VECTOR_BASE_1 0x2409
#define SPR_INTERRUPT_VECTOR_BASE_2 0x2309
#define SPR_INTERRUPT_VECTOR_BASE_3 0x2209
#define SPR_IPI_EVENT_0 0x1f05
#define SPR_IPI_EVENT_1 0x1e05
#define SPR_IPI_EVENT_2 0x1d05
#define SPR_IPI_EVENT_RESET_0 0x1f06
#define SPR_IPI_EVENT_RESET_1 0x1e06
#define SPR_IPI_EVENT_RESET_2 0x1d06
#define SPR_IPI_EVENT_SET_0 0x1f07
#define SPR_IPI_EVENT_SET_1 0x1e07
#define SPR_IPI_EVENT_SET_2 0x1d07
#define SPR_IPI_MASK_0 0x1f08
#define SPR_IPI_MASK_1 0x1e08
#define SPR_IPI_MASK_2 0x1d08
#define SPR_IPI_MASK_RESET_0 0x1f09
#define SPR_IPI_MASK_RESET_1 0x1e09
#define SPR_IPI_MASK_RESET_2 0x1d09
#define SPR_IPI_MASK_SET_0 0x1f0a
#define SPR_IPI_MASK_SET_1 0x1e0a
#define SPR_IPI_MASK_SET_2 0x1d0a
#define SPR_MPL_AUX_TILE_TIMER_SET_0 0x1700
#define SPR_MPL_AUX_TILE_TIMER_SET_1 0x1701
#define SPR_MPL_AUX_TILE_TIMER_SET_2 0x1702
#define SPR_MPL_INTCTRL_0_SET_0 0x2500
#define SPR_MPL_INTCTRL_0_SET_1 0x2501
#define SPR_MPL_INTCTRL_0_SET_2 0x2502
#define SPR_MPL_INTCTRL_1_SET_0 0x2400
#define SPR_MPL_INTCTRL_1_SET_1 0x2401
#define SPR_MPL_INTCTRL_1_SET_2 0x2402
#define SPR_MPL_INTCTRL_2_SET_0 0x2300
#define SPR_MPL_INTCTRL_2_SET_1 0x2301
#define SPR_MPL_INTCTRL_2_SET_2 0x2302
#define SPR_MPL_UDN_ACCESS_SET_0 0x0b00
#define SPR_MPL_UDN_ACCESS_SET_1 0x0b01
#define SPR_MPL_UDN_ACCESS_SET_2 0x0b02
#define SPR_MPL_UDN_AVAIL_SET_0 0x1b00
#define SPR_MPL_UDN_AVAIL_SET_1 0x1b01
#define SPR_MPL_UDN_AVAIL_SET_2 0x1b02
#define SPR_MPL_UDN_COMPLETE_SET_0 0x0600
#define SPR_MPL_UDN_COMPLETE_SET_1 0x0601
#define SPR_MPL_UDN_COMPLETE_SET_2 0x0602
#define SPR_MPL_UDN_FIREWALL_SET_0 0x1500
#define SPR_MPL_UDN_FIREWALL_SET_1 0x1501
#define SPR_MPL_UDN_FIREWALL_SET_2 0x1502
#define SPR_MPL_UDN_TIMER_SET_0 0x1900
#define SPR_MPL_UDN_TIMER_SET_1 0x1901
#define SPR_MPL_UDN_TIMER_SET_2 0x1902
#define SPR_MPL_WORLD_ACCESS_SET_0 0x2700
#define SPR_MPL_WORLD_ACCESS_SET_1 0x2701
#define SPR_MPL_WORLD_ACCESS_SET_2 0x2702
#define SPR_PASS 0x2709
#define SPR_PERF_COUNT_0 0x2005
#define SPR_PERF_COUNT_1 0x2006
#define SPR_PERF_COUNT_CTL 0x2007
#define SPR_PERF_COUNT_DN_CTL 0x2008
#define SPR_PERF_COUNT_STS 0x2009
#define SPR_PROC_STATUS 0x2784
#define SPR_SIM_CONTROL 0x2785
#define SPR_SINGLE_STEP_CONTROL_0 0x0405
#define SPR_SINGLE_STEP_CONTROL_0__CANCELED_MASK 0x1
#define SPR_SINGLE_STEP_CONTROL_0__INHIBIT_MASK 0x2
#define SPR_SINGLE_STEP_CONTROL_1 0x0305
#define SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK 0x1
#define SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK 0x2
#define SPR_SINGLE_STEP_CONTROL_2 0x0205
#define SPR_SINGLE_STEP_CONTROL_2__CANCELED_MASK 0x1
#define SPR_SINGLE_STEP_CONTROL_2__INHIBIT_MASK 0x2
#define SPR_SINGLE_STEP_EN_0_0 0x250a
#define SPR_SINGLE_STEP_EN_0_1 0x240a
#define SPR_SINGLE_STEP_EN_0_2 0x230a
#define SPR_SINGLE_STEP_EN_1_0 0x250b
#define SPR_SINGLE_STEP_EN_1_1 0x240b
#define SPR_SINGLE_STEP_EN_1_2 0x230b
#define SPR_SINGLE_STEP_EN_2_0 0x250c
#define SPR_SINGLE_STEP_EN_2_1 0x240c
#define SPR_SINGLE_STEP_EN_2_2 0x230c
#define SPR_SYSTEM_SAVE_0_0 0x2582
#define SPR_SYSTEM_SAVE_0_1 0x2583
#define SPR_SYSTEM_SAVE_0_2 0x2584
#define SPR_SYSTEM_SAVE_0_3 0x2585
#define SPR_SYSTEM_SAVE_1_0 0x2482
#define SPR_SYSTEM_SAVE_1_1 0x2483
#define SPR_SYSTEM_SAVE_1_2 0x2484
#define SPR_SYSTEM_SAVE_1_3 0x2485
#define SPR_SYSTEM_SAVE_2_0 0x2382
#define SPR_SYSTEM_SAVE_2_1 0x2383
#define SPR_SYSTEM_SAVE_2_2 0x2384
#define SPR_SYSTEM_SAVE_2_3 0x2385
#define SPR_TILE_COORD 0x270b
#define SPR_TILE_RTF_HWM 0x270c
#define SPR_TILE_TIMER_CONTROL 0x1605
#define SPR_UDN_AVAIL_EN 0x1b05
#define SPR_UDN_DATA_AVAIL 0x0b80
#define SPR_UDN_DEADLOCK_TIMEOUT 0x1906
#define SPR_UDN_DEMUX_COUNT_0 0x0b05
#define SPR_UDN_DEMUX_COUNT_1 0x0b06
#define SPR_UDN_DEMUX_COUNT_2 0x0b07
#define SPR_UDN_DEMUX_COUNT_3 0x0b08
#define SPR_UDN_DIRECTION_PROTECT 0x1505
#endif /* !defined(__ARCH_SPR_DEF_H__) */
#endif /* !defined(__DOXYGEN__) */
......@@ -130,17 +130,52 @@ static inline int atomic_read(const atomic_t *v)
*/
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/*
* We define xchg() and cmpxchg() in the included headers.
* Note that we do not define __HAVE_ARCH_CMPXCHG, since that would imply
* that cmpxchg() is an efficient operation, which is not particularly true.
*/
/* Nonexistent functions intended to cause link errors. */
extern unsigned long __xchg_called_with_bad_pointer(void);
extern unsigned long __cmpxchg_called_with_bad_pointer(void);
#define xchg(ptr, x) \
({ \
typeof(*(ptr)) __x; \
switch (sizeof(*(ptr))) { \
case 4: \
__x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \
(atomic_t *)(ptr), \
(u32)(typeof((x)-(x)))(x)); \
break; \
case 8: \
__x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \
(atomic64_t *)(ptr), \
(u64)(typeof((x)-(x)))(x)); \
break; \
default: \
__xchg_called_with_bad_pointer(); \
} \
__x; \
})
#define cmpxchg(ptr, o, n) \
({ \
typeof(*(ptr)) __x; \
switch (sizeof(*(ptr))) { \
case 4: \
__x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \
(atomic_t *)(ptr), \
(u32)(typeof((o)-(o)))(o), \
(u32)(typeof((n)-(n)))(n)); \
break; \
case 8: \
__x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \
(atomic64_t *)(ptr), \
(u64)(typeof((o)-(o)))(o), \
(u64)(typeof((n)-(n)))(n)); \
break; \
default: \
__cmpxchg_called_with_bad_pointer(); \
} \
__x; \
})
#define tas(ptr) (xchg((ptr), 1))
#endif /* __ASSEMBLY__ */
......
......@@ -110,16 +110,6 @@ static inline void atomic_set(atomic_t *v, int n)
_atomic_xchg(v, n);
}
#define xchg(ptr, x) ((typeof(*(ptr))) \
((sizeof(*(ptr)) == sizeof(atomic_t)) ? \
atomic_xchg((atomic_t *)(ptr), (long)(x)) : \
__xchg_called_with_bad_pointer()))
#define cmpxchg(ptr, o, n) ((typeof(*(ptr))) \
((sizeof(*(ptr)) == sizeof(atomic_t)) ? \
atomic_cmpxchg((atomic_t *)(ptr), (long)(o), (long)(n)) : \
__cmpxchg_called_with_bad_pointer()))
/* A 64bit atomic type */
typedef struct {
......
/*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* Do not include directly; use <asm/atomic.h>.
*/
#ifndef _ASM_TILE_ATOMIC_64_H
#define _ASM_TILE_ATOMIC_64_H
#ifndef __ASSEMBLY__
#include <arch/spr_def.h>
/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
#define atomic_set(v, i) ((v)->counter = (i))
/*
* The smp_mb() operations throughout are to support the fact that
* Linux requires memory barriers before and after the operation,
* on any routine which updates memory and returns a value.
*/
static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
{
int val;
__insn_mtspr(SPR_CMPEXCH_VALUE, o);
smp_mb(); /* barrier for proper semantics */
val = __insn_cmpexch4((void *)&v->counter, n);
smp_mb(); /* barrier for proper semantics */
return val;
}
static inline int atomic_xchg(atomic_t *v, int n)
{
int val;
smp_mb(); /* barrier for proper semantics */
val = __insn_exch4((void *)&v->counter, n);
smp_mb(); /* barrier for proper semantics */
return val;
}
static inline void atomic_add(int i, atomic_t *v)
{
__insn_fetchadd4((void *)&v->counter, i);
}
static inline int atomic_add_return(int i, atomic_t *v)
{
int val;
smp_mb(); /* barrier for proper semantics */
val = __insn_fetchadd4((void *)&v->counter, i) + i;
barrier(); /* the "+ i" above will wait on memory */
return val;
}
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int guess, oldval = v->counter;
do {
if (oldval == u)
break;
guess = oldval;
oldval = atomic_cmpxchg(v, guess, guess + a);
} while (guess != oldval);
return oldval != u;
}
/* Now the true 64-bit operations. */
#define ATOMIC64_INIT(i) { (i) }
#define atomic64_read(v) ((v)->counter)
#define atomic64_set(v, i) ((v)->counter = (i))
static inline long atomic64_cmpxchg(atomic64_t *v, long o, long n)
{
long val;
smp_mb(); /* barrier for proper semantics */
__insn_mtspr(SPR_CMPEXCH_VALUE, o);
val = __insn_cmpexch((void *)&v->counter, n);
smp_mb(); /* barrier for proper semantics */
return val;
}
static inline long atomic64_xchg(atomic64_t *v, long n)
{
long val;
smp_mb(); /* barrier for proper semantics */
val = __insn_exch((void *)&v->counter, n);
smp_mb(); /* barrier for proper semantics */
return val;
}
static inline void atomic64_add(long i, atomic64_t *v)
{
__insn_fetchadd((void *)&v->counter, i);
}
static inline long atomic64_add_return(long i, atomic64_t *v)
{
int val;
smp_mb(); /* barrier for proper semantics */
val = __insn_fetchadd((void *)&v->counter, i) + i;
barrier(); /* the "+ i" above will wait on memory */
return val;
}
static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
{
long guess, oldval = v->counter;
do {
if (oldval == u)
break;
guess = oldval;
oldval = atomic64_cmpxchg(v, guess, guess + a);
} while (guess != oldval);
return oldval != u;
}
#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
#define atomic64_sub(i, v) atomic64_add(-(i), (v))
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
#define atomic64_inc(v) atomic64_add(1, (v))
#define atomic64_dec(v) atomic64_sub(1, (v))
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
/* Atomic dec and inc don't implement barrier, so provide them if needed. */
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
/* Define this to indicate that cmpxchg is an efficient operation. */
#define __HAVE_ARCH_CMPXCHG
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_ATOMIC_64_H */
......@@ -12,80 +12,41 @@
* more details.
*/
#ifndef _TILE_BACKTRACE_H
#define _TILE_BACKTRACE_H
#ifndef _ASM_TILE_BACKTRACE_H
#define _ASM_TILE_BACKTRACE_H
#include <linux/types.h>
#include <arch/chip.h>
#if defined(__tile__)
typedef unsigned long VirtualAddress;
#elif CHIP_VA_WIDTH() > 32
typedef unsigned long long VirtualAddress;
#else
typedef unsigned int VirtualAddress;
#endif
/** Reads 'size' bytes from 'address' and writes the data to 'result'.
/* Reads 'size' bytes from 'address' and writes the data to 'result'.
* Returns true if successful, else false (e.g. memory not readable).
*/
typedef bool (*BacktraceMemoryReader)(void *result,
VirtualAddress address,
unsigned long address,
unsigned int size,
void *extra);
typedef struct {
/** Current PC. */
VirtualAddress pc;
/* Current PC. */
unsigned long pc;
/** Current stack pointer value. */
VirtualAddress sp;
/* Current stack pointer value. */
unsigned long sp;
/** Current frame pointer value (i.e. caller's stack pointer) */
VirtualAddress fp;
/* Current frame pointer value (i.e. caller's stack pointer) */
unsigned long fp;
/** Internal use only: caller's PC for first frame. */
VirtualAddress initial_frame_caller_pc;
/* Internal use only: caller's PC for first frame. */
unsigned long initial_frame_caller_pc;
/** Internal use only: callback to read memory. */
/* Internal use only: callback to read memory. */
BacktraceMemoryReader read_memory_func;
/** Internal use only: arbitrary argument to read_memory_func. */
/* Internal use only: arbitrary argument to read_memory_func. */
void *read_memory_func_extra;
} BacktraceIterator;
/** Initializes a backtracer to start from the given location.
*
* If the frame pointer cannot be determined it is set to -1.
*
* @param state The state to be filled in.
* @param read_memory_func A callback that reads memory. If NULL, a default
* value is provided.
* @param read_memory_func_extra An arbitrary argument to read_memory_func.
* @param pc The current PC.
* @param lr The current value of the 'lr' register.
* @param sp The current value of the 'sp' register.
* @param r52 The current value of the 'r52' register.
*/
extern void backtrace_init(BacktraceIterator *state,
BacktraceMemoryReader read_memory_func,
void *read_memory_func_extra,
VirtualAddress pc, VirtualAddress lr,
VirtualAddress sp, VirtualAddress r52);
/** Advances the backtracing state to the calling frame, returning
* true iff successful.
*/
extern bool backtrace_next(BacktraceIterator *state);
typedef enum {
/* We have no idea what the caller's pc is. */
......@@ -138,7 +99,7 @@ enum {
};
/** Internal constants used to define 'info' operands. */
/* Internal constants used to define 'info' operands. */
enum {
/* 0 and 1 are reserved, as are all negative numbers. */
......@@ -147,13 +108,10 @@ enum {
CALLER_SP_IN_R52_BASE = 4,
CALLER_SP_OFFSET_BASE = 8,
/* Marks the entry point of certain functions. */
ENTRY_POINT_INFO_OP = 16
};
/** Current backtracer state describing where it thinks the caller is. */
/* Current backtracer state describing where it thinks the caller is. */
typedef struct {
/*
* Public fields
......@@ -192,7 +150,13 @@ typedef struct {
} CallerLocation;
extern void backtrace_init(BacktraceIterator *state,
BacktraceMemoryReader read_memory_func,
void *read_memory_func_extra,
unsigned long pc, unsigned long lr,
unsigned long sp, unsigned long r52);
extern bool backtrace_next(BacktraceIterator *state);
#endif /* _TILE_BACKTRACE_H */
#endif /* _ASM_TILE_BACKTRACE_H */
......@@ -122,6 +122,7 @@ static inline unsigned long __arch_hweight64(__u64 w)
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/le.h>
#endif /* _ASM_TILE_BITOPS_H */
......@@ -126,7 +126,6 @@ static inline int test_and_change_bit(unsigned nr,
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() do {} while (0)
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/ext2-atomic.h>
#endif /* _ASM_TILE_BITOPS_32_H */
/*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_BITOPS_64_H
#define _ASM_TILE_BITOPS_64_H
#include <linux/compiler.h>
#include <asm/atomic.h>
#include <asm/system.h>
/* See <asm/bitops.h> for API comments. */
static inline void set_bit(unsigned nr, volatile unsigned long *addr)
{
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask);
}
static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
{
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask);
}
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb()
static inline void change_bit(unsigned nr, volatile unsigned long *addr)
{
unsigned long old, mask = (1UL << (nr % BITS_PER_LONG));
long guess, oldval;
addr += nr / BITS_PER_LONG;
old = *addr;
do {
guess = oldval;
oldval = atomic64_cmpxchg((atomic64_t *)addr,
guess, guess ^ mask);
} while (guess != oldval);
}
/*
* The test_and_xxx_bit() routines require a memory fence before we
* start the operation, and after the operation completes. We use
* smp_mb() before, and rely on the "!= 0" comparison, plus a compiler
* barrier(), to block until the atomic op is complete.
*/
static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
{
int val;
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
smp_mb(); /* barrier for proper semantics */
val = (__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask)
& mask) != 0;
barrier();
return val;
}
static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
{
int val;
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
smp_mb(); /* barrier for proper semantics */
val = (__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask)
& mask) != 0;
barrier();
return val;
}
static inline int test_and_change_bit(unsigned nr,
volatile unsigned long *addr)
{
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
long guess, oldval = *addr;
addr += nr / BITS_PER_LONG;
oldval = *addr;
do {
guess = oldval;
oldval = atomic64_cmpxchg((atomic64_t *)addr,
guess, guess ^ mask);
} while (guess != oldval);
return (oldval & mask) != 0;
}
#define ext2_set_bit_atomic(lock, nr, addr) \
test_and_set_bit((nr), (unsigned long *)(addr))
#define ext2_clear_bit_atomic(lock, nr, addr) \
test_and_clear_bit((nr), (unsigned long *)(addr))
#endif /* _ASM_TILE_BITOPS_64_H */
......@@ -116,22 +116,28 @@ static inline void __finv_buffer(void *buffer, size_t size)
}
/* Invalidate a VA range, then memory fence. */
/* Invalidate a VA range and wait for it to be complete. */
static inline void inv_buffer(void *buffer, size_t size)
{
__inv_buffer(buffer, size);
mb_incoherent();
mb();
}
/* Flush a VA range, then memory fence. */
static inline void flush_buffer(void *buffer, size_t size)
/*
* Flush a locally-homecached VA range and wait for the evicted
* cachelines to hit memory.
*/
static inline void flush_buffer_local(void *buffer, size_t size)
{
__flush_buffer(buffer, size);
mb_incoherent();
}
/* Flush & invalidate a VA range, then memory fence. */
static inline void finv_buffer(void *buffer, size_t size)
/*
* Flush and invalidate a locally-homecached VA range and wait for the
* evicted cachelines to hit memory.
*/
static inline void finv_buffer_local(void *buffer, size_t size)
{
__finv_buffer(buffer, size);
mb_incoherent();
......
......@@ -215,8 +215,8 @@ struct compat_sigaction;
struct compat_siginfo;
struct compat_sigaltstack;
long compat_sys_execve(const char __user *path,
const compat_uptr_t __user *argv,
const compat_uptr_t __user *envp, struct pt_regs *);
compat_uptr_t __user *argv,
compat_uptr_t __user *envp, struct pt_regs *);
long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
struct compat_sigaction __user *oact,
size_t sigsetsize);
......
......@@ -65,7 +65,8 @@ extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t,
extern void dma_sync_single_range_for_device(struct device *, dma_addr_t,
unsigned long offset, size_t,
enum dma_data_direction);
extern void dma_cache_sync(void *vaddr, size_t, enum dma_data_direction);
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t,
enum dma_data_direction);
static inline int
dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
......
#include <asm-generic/fb.h>
......@@ -52,6 +52,7 @@ extern void iounmap(volatile void __iomem *addr);
#endif
#define ioremap_nocache(physaddr, size) ioremap(physaddr, size)
#define ioremap_wc(physaddr, size) ioremap(physaddr, size)
#define ioremap_writethrough(physaddr, size) ioremap(physaddr, size)
#define ioremap_fullcache(physaddr, size) ioremap(physaddr, size)
......@@ -161,6 +162,15 @@ static inline void _tile_writeq(u64 val, unsigned long addr)
#define iowrite32 writel
#define iowrite64 writeq
static inline void memset_io(void *dst, int val, size_t len)
{
int x;
BUG_ON((unsigned long)dst & 0x3);
val = (val & 0xff) * 0x01010101;
for (x = 0; x < len; x += 4)
writel(val, dst + x);
}
static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
size_t len)
{
......@@ -269,6 +279,11 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
ioport_panic();
}
#define ioread16be(addr) be16_to_cpu(ioread16(addr))
#define ioread32be(addr) be32_to_cpu(ioread32(addr))
#define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr))
#define iowrite32be(v, addr) iowrite32(be32_to_cpu(v), (addr))
#define ioread8_rep(p, dst, count) \
insb((unsigned long) (p), (dst), (count))
#define ioread16_rep(p, dst, count) \
......@@ -283,4 +298,7 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
#define iowrite32_rep(p, src, count) \
outsl((unsigned long) (p), (src), (count))
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
#endif /* _ASM_TILE_IO_H */
......@@ -23,6 +23,8 @@
/* IRQ numbers used for linux IPIs. */
#define IRQ_RESCHEDULE 1
#define irq_canonicalize(irq) (irq)
void ack_bad_irq(unsigned int irq);
/*
......
......@@ -100,8 +100,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
__get_cpu_var(current_asid) = asid;
/* Clear cpu from the old mm, and set it in the new one. */
cpumask_clear_cpu(cpu, &prev->cpu_vm_mask);
cpumask_set_cpu(cpu, &next->cpu_vm_mask);
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
/* Re-load page tables */
install_page_table(next->pgd, asid);
......
......@@ -1502,5 +1502,12 @@ extern int parse_insn_tile(tile_bundle_bits bits,
decoded[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]);
/* Given a set of bundle bits and a specific pipe, returns which
* instruction the bundle contains in that pipe.
*/
extern const struct tile_opcode *
find_opcode(tile_bundle_bits bits, tile_pipeline pipe);
#endif /* opcode_tile_h */
This diff is collapsed.
This diff is collapsed.
......@@ -16,7 +16,8 @@
#define _ASM_TILE_PAGE_H
#include <linux/const.h>
#include <hv/pagesize.h>
#include <hv/hypervisor.h>
#include <arch/chip.h>
/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
#define PAGE_SHIFT HV_LOG2_PAGE_SIZE_SMALL
......@@ -28,8 +29,6 @@
#define PAGE_MASK (~(PAGE_SIZE - 1))
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#ifdef __KERNEL__
/*
* If the Kconfig doesn't specify, set a maximum zone order that
* is enough so that we can create huge pages from small pages given
......@@ -39,9 +38,6 @@
#define CONFIG_FORCE_MAX_ZONEORDER (HPAGE_SHIFT - PAGE_SHIFT + 1)
#endif
#include <hv/hypervisor.h>
#include <arch/chip.h>
#ifndef __ASSEMBLY__
#include <linux/types.h>
......@@ -91,6 +87,10 @@ typedef struct page *pgtable_t;
/* Must be a macro since it is used to create constants. */
#define __pgprot(val) hv_pte(val)
/* Rarely-used initializers, typically with a "zero" value. */
#define __pte(x) hv_pte(x)
#define __pgd(x) hv_pte(x)
static inline u64 pgprot_val(pgprot_t pgprot)
{
return hv_pte_val(pgprot);
......@@ -110,6 +110,8 @@ static inline u64 pgd_val(pgd_t pgd)
typedef HV_PTE pmd_t;
#define __pmd(x) hv_pte(x)
static inline u64 pmd_val(pmd_t pmd)
{
return hv_pte_val(pmd);
......@@ -318,7 +320,7 @@ static inline int pfn_valid(unsigned long pfn)
/* Provide as macros since these require some other headers included. */
#define page_to_pa(page) ((phys_addr_t)(page_to_pfn(page)) << PAGE_SHIFT)
#define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn(kaddr))
#define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn((void *)(kaddr)))
#define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page))
struct mm_struct;
......@@ -331,6 +333,4 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
#include <asm-generic/memory_model.h>
#endif /* __KERNEL__ */
#endif /* _ASM_TILE_PAGE_H */
#include <asm-generic/parport.h>
......@@ -46,7 +46,8 @@ struct pci_controller {
*/
#define PCI_DMA_BUS_IS_PHYS 1
int __init tile_pci_init(void);
int __devinit tile_pci_init(void);
int __devinit pcibios_init(void);
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
......
/*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
*/
#ifndef _ASM_TILE_PGTABLE_64_H
#define _ASM_TILE_PGTABLE_64_H
/* The level-0 page table breaks the address space into 32-bit chunks. */
#define PGDIR_SHIFT HV_LOG2_L1_SPAN
#define PGDIR_SIZE HV_L1_SPAN
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PTRS_PER_PGD HV_L0_ENTRIES
#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t))
/*
* The level-1 index is defined by the huge page size. A PMD is composed
* of PTRS_PER_PMD pgd_t's and is the middle level of the page table.
*/
#define PMD_SHIFT HV_LOG2_PAGE_SIZE_LARGE
#define PMD_SIZE HV_PAGE_SIZE_LARGE
#define PMD_MASK (~(PMD_SIZE-1))
#define PTRS_PER_PMD (1 << (PGDIR_SHIFT - PMD_SHIFT))
#define SIZEOF_PMD (PTRS_PER_PMD * sizeof(pmd_t))
/*
* The level-2 index is defined by the difference between the huge
* page size and the normal page size. A PTE is composed of
* PTRS_PER_PTE pte_t's and is the bottom level of the page table.
* Note that the hypervisor docs use PTE for what we call pte_t, so
* this nomenclature is somewhat confusing.
*/
#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL))
#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t))
/*
* Align the vmalloc area to an L2 page table, and leave a guard page
* at the beginning and end. The vmalloc code also puts in an internal
* guard page between each allocation.
*/
#define _VMALLOC_END HUGE_VMAP_BASE
#define VMALLOC_END (_VMALLOC_END - PAGE_SIZE)
#define VMALLOC_START (_VMALLOC_START + PAGE_SIZE)
#define HUGE_VMAP_END (HUGE_VMAP_BASE + PGDIR_SIZE)
#ifndef __ASSEMBLY__
/* We have no pud since we are a three-level page table. */
#include <asm-generic/pgtable-nopud.h>
static inline int pud_none(pud_t pud)
{
return pud_val(pud) == 0;
}
static inline int pud_present(pud_t pud)
{
return pud_val(pud) & _PAGE_PRESENT;
}
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd 0x%016llx.\n", __FILE__, __LINE__, pmd_val(e))
static inline void pud_clear(pud_t *pudp)
{
__pte_clear(&pudp->pgd);
}
static inline int pud_bad(pud_t pud)
{
return ((pud_val(pud) & _PAGE_ALL) != _PAGE_TABLE);
}
/* Return the page-table frame number (ptfn) that a pud_t points at. */
#define pud_ptfn(pud) hv_pte_get_ptfn((pud).pgd)
/*
* A given kernel pud_t maps to a kernel pmd_t table at a specific
* virtual address. Since kernel pmd_t tables can be aligned at
* sub-page granularity, this macro can return non-page-aligned
* pointers, despite its name.
*/
#define pud_page_vaddr(pud) \
(__va((phys_addr_t)pud_ptfn(pud) << HV_LOG2_PAGE_TABLE_ALIGN))
/*
* A pud_t points to a pmd_t array. Since we can have multiple per
* page, we don't have a one-to-one mapping of pud_t's to pages.
*/
#define pud_page(pud) pfn_to_page(HV_PTFN_TO_PFN(pud_ptfn(pud)))
static inline unsigned long pud_index(unsigned long address)
{
return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
}
#define pmd_offset(pud, address) \
((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address))
static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
{
set_pte(pmdp, pmdval);
}
/* Create a pmd from a PTFN and pgprot. */
static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
{
return hv_pte_set_ptfn(prot, ptfn);
}
/* Return the page-table frame number (ptfn) that a pmd_t points at. */
static inline unsigned long pmd_ptfn(pmd_t pmd)
{
return hv_pte_get_ptfn(pmd);
}
static inline void pmd_clear(pmd_t *pmdp)
{
__pte_clear(pmdp);
}
/* Normalize an address to having the correct high bits set. */
#define pgd_addr_normalize pgd_addr_normalize
static inline unsigned long pgd_addr_normalize(unsigned long addr)
{
return ((long)addr << (CHIP_WORD_SIZE() - CHIP_VA_WIDTH())) >>
(CHIP_WORD_SIZE() - CHIP_VA_WIDTH());
}
/* We don't define any pgds for these addresses. */
static inline int pgd_addr_invalid(unsigned long addr)
{
return addr >= MEM_HV_START ||
(addr > MEM_LOW_END && addr < MEM_HIGH_START);
}
/*
* Use atomic instructions to provide atomicity against the hypervisor.
*/
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
return (__insn_fetchand(&ptep->val, ~HV_PTE_ACCESSED) >>
HV_PTE_INDEX_ACCESSED) & 0x1;
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
__insn_fetchand(&ptep->val, ~HV_PTE_WRITABLE);
}
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
return hv_pte(__insn_exch(&ptep->val, 0UL));
}
#endif /* __ASSEMBLY__ */
#endif /* _ASM_TILE_PGTABLE_64_H */
......@@ -215,6 +215,8 @@ static inline void release_thread(struct task_struct *dead_task)
extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
extern int do_work_pending(struct pt_regs *regs, u32 flags);
/*
* Return saved (kernel) PC of a blocked thread.
......@@ -255,10 +257,6 @@ static inline void cpu_relax(void)
barrier();
}
struct siginfo;
extern void arch_coredump_signal(struct siginfo *, struct pt_regs *);
#define arch_coredump_signal arch_coredump_signal
/* Info on this processor (see fs/proc/cpuinfo.c) */
struct seq_operations;
extern const struct seq_operations cpuinfo_op;
......@@ -269,9 +267,6 @@ extern char chip_model[64];
/* Data on which physical memory controller corresponds to which NUMA node. */
extern int node_controller[];
/* Do we dump information to the console when a user application crashes? */
extern int show_crashinfo;
#if CHIP_HAS_CBOX_HOME_MAP()
/* Does the heap allocator return hash-for-home pages by default? */
extern int hash_default;
......
#include <asm-generic/serial.h>
......@@ -28,6 +28,10 @@ struct pt_regs;
int restore_sigcontext(struct pt_regs *, struct sigcontext __user *);
int setup_sigcontext(struct sigcontext __user *, struct pt_regs *);
void do_signal(struct pt_regs *regs);
void signal_fault(const char *type, struct pt_regs *,
void __user *frame, int sig);
void trace_unhandled_signal(const char *type, struct pt_regs *regs,
unsigned long address, int signo);
#endif
#endif /* _ASM_TILE_SIGNAL_H */
/*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* 64-bit SMP ticket spinlocks, allowing only a single CPU anywhere
* (the type definitions are in asm/spinlock_types.h)
*/
#ifndef _ASM_TILE_SPINLOCK_64_H
#define _ASM_TILE_SPINLOCK_64_H
/* Shifts and masks for the various fields in "lock". */
#define __ARCH_SPIN_CURRENT_SHIFT 17
#define __ARCH_SPIN_NEXT_MASK 0x7fff
#define __ARCH_SPIN_NEXT_OVERFLOW 0x8000
/*
* Return the "current" portion of a ticket lock value,
* i.e. the number that currently owns the lock.
*/
static inline int arch_spin_current(u32 val)
{
return val >> __ARCH_SPIN_CURRENT_SHIFT;
}
/*
* Return the "next" portion of a ticket lock value,
* i.e. the number that the next task to try to acquire the lock will get.
*/
static inline int arch_spin_next(u32 val)
{
return val & __ARCH_SPIN_NEXT_MASK;
}
/* The lock is locked if a task would have to wait to get it. */
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
u32 val = lock->lock;
return arch_spin_current(val) != arch_spin_next(val);
}
/* Bump the current ticket so the next task owns the lock. */
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
wmb(); /* guarantee anything modified under the lock is visible */
__insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT);
}
void arch_spin_unlock_wait(arch_spinlock_t *lock);
void arch_spin_lock_slow(arch_spinlock_t *lock, u32 val);
/* Grab the "next" ticket number and bump it atomically.
* If the current ticket is not ours, go to the slow path.
* We also take the slow path if the "next" value overflows.
*/
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
u32 val = __insn_fetchadd4(&lock->lock, 1);
u32 ticket = val & (__ARCH_SPIN_NEXT_MASK | __ARCH_SPIN_NEXT_OVERFLOW);
if (unlikely(arch_spin_current(val) != ticket))
arch_spin_lock_slow(lock, ticket);
}
/* Try to get the lock, and return whether we succeeded. */
int arch_spin_trylock(arch_spinlock_t *lock);
/* We cannot take an interrupt after getting a ticket, so don't enable them. */
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
*
* We use fetchadd() for readers, and fetchor() with the sign bit
* for writers.
*/
#define __WRITE_LOCK_BIT (1 << 31)
static inline int arch_write_val_locked(int val)
{
return val < 0; /* Optimize "val & __WRITE_LOCK_BIT". */
}
/**
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
static inline int arch_read_can_lock(arch_rwlock_t *rw)
{
return !arch_write_val_locked(rw->lock);
}
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
static inline int arch_write_can_lock(arch_rwlock_t *rw)
{
return rw->lock == 0;
}
extern void __read_lock_failed(arch_rwlock_t *rw);
static inline void arch_read_lock(arch_rwlock_t *rw)
{
u32 val = __insn_fetchaddgez4(&rw->lock, 1);
if (unlikely(arch_write_val_locked(val)))
__read_lock_failed(rw);
}
extern void __write_lock_failed(arch_rwlock_t *rw, u32 val);
static inline void arch_write_lock(arch_rwlock_t *rw)
{
u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
if (unlikely(val != 0))
__write_lock_failed(rw, val);
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
__insn_mf();
__insn_fetchadd4(&rw->lock, -1);
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
__insn_mf();
rw->lock = 0;
}
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
return !arch_write_val_locked(__insn_fetchaddgez4(&rw->lock, 1));
}
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
if (likely(val == 0))
return 1;
if (!arch_write_val_locked(val))
__insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT);
return 0;
}
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#endif /* _ASM_TILE_SPINLOCK_64_H */
#ifdef CONFIG_COMPAT
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
#define __ARCH_WANT_STAT64 /* Used for compat_sys_stat64() etc. */
#endif
#include <asm-generic/stat.h>
......@@ -18,12 +18,6 @@
/* Tile gcc is always >= 4.3.0, so we use __builtin_bswap. */
#define __arch_swab32(x) __builtin_bswap32(x)
#define __arch_swab64(x) __builtin_bswap64(x)
/* Use the variant that is natural for the wordsize. */
#ifdef CONFIG_64BIT
#define __arch_swab16(x) (__builtin_bswap64(x) >> 48)
#else
#define __arch_swab16(x) (__builtin_bswap32(x) >> 16)
#endif
#endif /* _ASM_TILE_SWAB_H */
......@@ -125,6 +125,7 @@ extern void cpu_idle_on_new_stack(struct thread_info *old_ti,
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
#define TIF_SECCOMP 6 /* secure computing */
#define TIF_MEMDIE 7 /* OOM killer at work */
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
......@@ -134,10 +135,12 @@ extern void cpu_idle_on_new_stack(struct thread_info *old_ti,
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_MEMDIE (1<<TIF_MEMDIE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
/* Work to do on any return to user space. */
#define _TIF_ALLWORK_MASK \
(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|_TIF_ASYNC_TLB)
(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|\
_TIF_ASYNC_TLB|_TIF_NOTIFY_RESUME)
/*
* Thread-synchronous status.
......
This diff is collapsed.
......@@ -15,10 +15,14 @@
#ifndef _ASM_TILE_TRAPS_H
#define _ASM_TILE_TRAPS_H
#include <arch/chip.h>
/* mm/fault.c */
void do_page_fault(struct pt_regs *, int fault_num,
unsigned long address, unsigned long write);
#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
void do_async_page_fault(struct pt_regs *);
#endif
#ifndef __tilegx__
/*
......
......@@ -15,7 +15,7 @@
#if !defined(_ASM_TILE_UNISTD_H) || defined(__SYSCALL)
#define _ASM_TILE_UNISTD_H
#ifndef __LP64__
#if !defined(__LP64__) || defined(__SYSCALL_COMPAT)
/* Use the flavor of this syscall that matches the 32-bit API better. */
#define __ARCH_WANT_SYNC_FILE_RANGE2
#endif
......
......@@ -22,8 +22,6 @@
#include <arch/chip.h>
#include <hv/pagesize.h>
/* Linux builds want unsigned long constants, but assembler wants numbers */
#ifdef __ASSEMBLER__
/** One, for assembler */
......@@ -44,11 +42,21 @@
*/
#define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN)
/** The log2 of the size of small pages, in bytes. This value should
* be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
*/
#define HV_LOG2_PAGE_SIZE_SMALL 16
/** The size of small pages, in bytes. This value should be verified
* at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
*/
#define HV_PAGE_SIZE_SMALL (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_SMALL)
/** The log2 of the size of large pages, in bytes. This value should be
* verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
*/
#define HV_LOG2_PAGE_SIZE_LARGE 24
/** The size of large pages, in bytes. This value should be verified
* at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
*/
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -308,6 +308,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
info.si_addr = (void __user *)address;
if (signo == SIGILL)
info.si_trapno = fault_num;
trace_unhandled_signal("trap", regs, address, signo);
force_sig_info(signo, &info, current);
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment