Commit 56d61a0e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6:
  [S390] 4level-fixup cleanup
  [S390] Cleanup page table definitions.
  [S390] Introduce follow_table in uaccess_pt.c
  [S390] Remove unused user_seg from thread structure.
  [S390] tlb flush fix.
  [S390] kernel: Fix dump on panic for DASDs under LPAR.
  [S390] struct class_device -> struct device conversion.
  [S390] cio: Fix incomplete commit for uevent suppression.
  [S390] cio: Use to_channelpath() for device to channel path conversion.
  [S390] Add per-cpu idle time / idle count sysfs attributes.
  [S390] Update default configuration.
parents 5f48b338 190a1d72
# #
# Automatically generated make config: don't edit # Automatically generated make config: don't edit
# Linux kernel version: 2.6.22 # Linux kernel version: 2.6.23
# Tue Jul 17 12:50:23 2007 # Mon Oct 22 12:10:44 2007
# #
CONFIG_MMU=y CONFIG_MMU=y
CONFIG_ZONE_DMA=y CONFIG_ZONE_DMA=y
...@@ -19,15 +19,11 @@ CONFIG_S390=y ...@@ -19,15 +19,11 @@ CONFIG_S390=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
# #
# Code maturity level options # General setup
# #
CONFIG_EXPERIMENTAL=y CONFIG_EXPERIMENTAL=y
CONFIG_LOCK_KERNEL=y CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_INIT_ENV_ARG_LIMIT=32
#
# General setup
#
CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y CONFIG_LOCALVERSION_AUTO=y
CONFIG_SWAP=y CONFIG_SWAP=y
...@@ -42,7 +38,14 @@ CONFIG_AUDIT=y ...@@ -42,7 +38,14 @@ CONFIG_AUDIT=y
CONFIG_IKCONFIG=y CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=17 CONFIG_LOG_BUF_SHIFT=17
CONFIG_CGROUPS=y
# CONFIG_CGROUP_DEBUG is not set
CONFIG_CGROUP_NS=y
CONFIG_CGROUP_CPUACCT=y
# CONFIG_CPUSETS is not set # CONFIG_CPUSETS is not set
CONFIG_FAIR_GROUP_SCHED=y
CONFIG_FAIR_USER_SCHED=y
# CONFIG_FAIR_CGROUP_SCHED is not set
CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED=y
# CONFIG_RELAY is not set # CONFIG_RELAY is not set
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y
...@@ -63,7 +66,6 @@ CONFIG_FUTEX=y ...@@ -63,7 +66,6 @@ CONFIG_FUTEX=y
CONFIG_ANON_INODES=y CONFIG_ANON_INODES=y
CONFIG_EPOLL=y CONFIG_EPOLL=y
CONFIG_SIGNALFD=y CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y CONFIG_EVENTFD=y
CONFIG_SHMEM=y CONFIG_SHMEM=y
CONFIG_VM_EVENT_COUNTERS=y CONFIG_VM_EVENT_COUNTERS=y
...@@ -83,6 +85,7 @@ CONFIG_STOP_MACHINE=y ...@@ -83,6 +85,7 @@ CONFIG_STOP_MACHINE=y
CONFIG_BLOCK=y CONFIG_BLOCK=y
# CONFIG_BLK_DEV_IO_TRACE is not set # CONFIG_BLK_DEV_IO_TRACE is not set
CONFIG_BLK_DEV_BSG=y CONFIG_BLK_DEV_BSG=y
CONFIG_BLOCK_COMPAT=y
# #
# IO Schedulers # IO Schedulers
...@@ -108,7 +111,6 @@ CONFIG_64BIT=y ...@@ -108,7 +111,6 @@ CONFIG_64BIT=y
CONFIG_SMP=y CONFIG_SMP=y
CONFIG_NR_CPUS=32 CONFIG_NR_CPUS=32
CONFIG_HOTPLUG_CPU=y CONFIG_HOTPLUG_CPU=y
CONFIG_DEFAULT_MIGRATION_COST=1000000
CONFIG_COMPAT=y CONFIG_COMPAT=y
CONFIG_SYSVIPC_COMPAT=y CONFIG_SYSVIPC_COMPAT=y
CONFIG_AUDIT_ARCH=y CONFIG_AUDIT_ARCH=y
...@@ -143,9 +145,11 @@ CONFIG_FLATMEM_MANUAL=y ...@@ -143,9 +145,11 @@ CONFIG_FLATMEM_MANUAL=y
CONFIG_FLATMEM=y CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_FLAT_NODE_MEM_MAP=y
# CONFIG_SPARSEMEM_STATIC is not set # CONFIG_SPARSEMEM_STATIC is not set
# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
CONFIG_SPLIT_PTLOCK_CPUS=4 CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_RESOURCES_64BIT=y CONFIG_RESOURCES_64BIT=y
CONFIG_ZONE_DMA_FLAG=1 CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
CONFIG_VIRT_TO_BUS=y CONFIG_VIRT_TO_BUS=y
CONFIG_HOLES_IN_ZONE=y CONFIG_HOLES_IN_ZONE=y
...@@ -219,12 +223,14 @@ CONFIG_INET_TUNNEL=y ...@@ -219,12 +223,14 @@ CONFIG_INET_TUNNEL=y
CONFIG_INET_XFRM_MODE_TRANSPORT=y CONFIG_INET_XFRM_MODE_TRANSPORT=y
CONFIG_INET_XFRM_MODE_TUNNEL=y CONFIG_INET_XFRM_MODE_TUNNEL=y
CONFIG_INET_XFRM_MODE_BEET=y CONFIG_INET_XFRM_MODE_BEET=y
CONFIG_INET_LRO=y
CONFIG_INET_DIAG=y CONFIG_INET_DIAG=y
CONFIG_INET_TCP_DIAG=y CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set # CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic" CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TCP_MD5SIG is not set # CONFIG_TCP_MD5SIG is not set
# CONFIG_IP_VS is not set
CONFIG_IPV6=y CONFIG_IPV6=y
# CONFIG_IPV6_PRIVACY is not set # CONFIG_IPV6_PRIVACY is not set
# CONFIG_IPV6_ROUTER_PREF is not set # CONFIG_IPV6_ROUTER_PREF is not set
...@@ -243,7 +249,48 @@ CONFIG_IPV6_SIT=y ...@@ -243,7 +249,48 @@ CONFIG_IPV6_SIT=y
# CONFIG_IPV6_TUNNEL is not set # CONFIG_IPV6_TUNNEL is not set
# CONFIG_IPV6_MULTIPLE_TABLES is not set # CONFIG_IPV6_MULTIPLE_TABLES is not set
# CONFIG_NETWORK_SECMARK is not set # CONFIG_NETWORK_SECMARK is not set
# CONFIG_NETFILTER is not set CONFIG_NETFILTER=y
# CONFIG_NETFILTER_DEBUG is not set
#
# Core Netfilter Configuration
#
CONFIG_NETFILTER_NETLINK=m
CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NETFILTER_NETLINK_LOG=m
CONFIG_NF_CONNTRACK_ENABLED=m
CONFIG_NF_CONNTRACK=m
# CONFIG_NF_CT_ACCT is not set
# CONFIG_NF_CONNTRACK_MARK is not set
# CONFIG_NF_CONNTRACK_EVENTS is not set
# CONFIG_NF_CT_PROTO_SCTP is not set
# CONFIG_NF_CT_PROTO_UDPLITE is not set
# CONFIG_NF_CONNTRACK_AMANDA is not set
# CONFIG_NF_CONNTRACK_FTP is not set
# CONFIG_NF_CONNTRACK_H323 is not set
# CONFIG_NF_CONNTRACK_IRC is not set
# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
# CONFIG_NF_CONNTRACK_PPTP is not set
# CONFIG_NF_CONNTRACK_SANE is not set
# CONFIG_NF_CONNTRACK_SIP is not set
# CONFIG_NF_CONNTRACK_TFTP is not set
# CONFIG_NF_CT_NETLINK is not set
# CONFIG_NETFILTER_XTABLES is not set
#
# IP: Netfilter Configuration
#
# CONFIG_NF_CONNTRACK_IPV4 is not set
# CONFIG_IP_NF_QUEUE is not set
# CONFIG_IP_NF_IPTABLES is not set
# CONFIG_IP_NF_ARPTABLES is not set
#
# IPv6: Netfilter Configuration (EXPERIMENTAL)
#
# CONFIG_NF_CONNTRACK_IPV6 is not set
# CONFIG_IP6_NF_QUEUE is not set
# CONFIG_IP6_NF_IPTABLES is not set
# CONFIG_IP_DCCP is not set # CONFIG_IP_DCCP is not set
CONFIG_IP_SCTP=m CONFIG_IP_SCTP=m
# CONFIG_SCTP_DBG_MSG is not set # CONFIG_SCTP_DBG_MSG is not set
...@@ -263,12 +310,7 @@ CONFIG_SCTP_HMAC_MD5=y ...@@ -263,12 +310,7 @@ CONFIG_SCTP_HMAC_MD5=y
# CONFIG_LAPB is not set # CONFIG_LAPB is not set
# CONFIG_ECONET is not set # CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set # CONFIG_WAN_ROUTER is not set
#
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y CONFIG_NET_SCHED=y
CONFIG_NET_SCH_FIFO=y
# #
# Queueing/Scheduling # Queueing/Scheduling
...@@ -306,10 +348,12 @@ CONFIG_NET_CLS_ACT=y ...@@ -306,10 +348,12 @@ CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=y CONFIG_NET_ACT_POLICE=y
# CONFIG_NET_ACT_GACT is not set # CONFIG_NET_ACT_GACT is not set
# CONFIG_NET_ACT_MIRRED is not set # CONFIG_NET_ACT_MIRRED is not set
CONFIG_NET_ACT_NAT=m
# CONFIG_NET_ACT_PEDIT is not set # CONFIG_NET_ACT_PEDIT is not set
# CONFIG_NET_ACT_SIMP is not set # CONFIG_NET_ACT_SIMP is not set
CONFIG_NET_CLS_POLICE=y CONFIG_NET_CLS_POLICE=y
# CONFIG_NET_CLS_IND is not set # CONFIG_NET_CLS_IND is not set
CONFIG_NET_SCH_FIFO=y
# #
# Network testing # Network testing
...@@ -329,6 +373,7 @@ CONFIG_CCW=y ...@@ -329,6 +373,7 @@ CONFIG_CCW=y
# #
# Generic Driver Options # Generic Driver Options
# #
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_STANDALONE=y CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set # CONFIG_FW_LOADER is not set
...@@ -400,17 +445,11 @@ CONFIG_SCSI_FC_ATTRS=y ...@@ -400,17 +445,11 @@ CONFIG_SCSI_FC_ATTRS=y
# CONFIG_SCSI_ISCSI_ATTRS is not set # CONFIG_SCSI_ISCSI_ATTRS is not set
# CONFIG_SCSI_SAS_ATTRS is not set # CONFIG_SCSI_SAS_ATTRS is not set
# CONFIG_SCSI_SAS_LIBSAS is not set # CONFIG_SCSI_SAS_LIBSAS is not set
# CONFIG_SCSI_SRP_ATTRS is not set
# CONFIG_SCSI_LOWLEVEL=y
# SCSI low-level drivers
#
# CONFIG_ISCSI_TCP is not set # CONFIG_ISCSI_TCP is not set
# CONFIG_SCSI_DEBUG is not set # CONFIG_SCSI_DEBUG is not set
CONFIG_ZFCP=y CONFIG_ZFCP=y
#
# Multi-device support (RAID and LVM)
#
CONFIG_MD=y CONFIG_MD=y
CONFIG_BLK_DEV_MD=y CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=m CONFIG_MD_LINEAR=m
...@@ -429,7 +468,9 @@ CONFIG_DM_ZERO=y ...@@ -429,7 +468,9 @@ CONFIG_DM_ZERO=y
CONFIG_DM_MULTIPATH=y CONFIG_DM_MULTIPATH=y
# CONFIG_DM_MULTIPATH_EMC is not set # CONFIG_DM_MULTIPATH_EMC is not set
# CONFIG_DM_MULTIPATH_RDAC is not set # CONFIG_DM_MULTIPATH_RDAC is not set
# CONFIG_DM_MULTIPATH_HP is not set
# CONFIG_DM_DELAY is not set # CONFIG_DM_DELAY is not set
# CONFIG_DM_UEVENT is not set
CONFIG_NETDEVICES=y CONFIG_NETDEVICES=y
# CONFIG_NETDEVICES_MULTIQUEUE is not set # CONFIG_NETDEVICES_MULTIQUEUE is not set
# CONFIG_IFB is not set # CONFIG_IFB is not set
...@@ -438,8 +479,13 @@ CONFIG_BONDING=m ...@@ -438,8 +479,13 @@ CONFIG_BONDING=m
# CONFIG_MACVLAN is not set # CONFIG_MACVLAN is not set
CONFIG_EQUALIZER=m CONFIG_EQUALIZER=m
CONFIG_TUN=m CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_NET_ETHERNET=y CONFIG_NET_ETHERNET=y
# CONFIG_MII is not set # CONFIG_MII is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
CONFIG_NETDEV_1000=y CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y CONFIG_NETDEV_10000=y
# CONFIG_TR is not set # CONFIG_TR is not set
...@@ -473,7 +519,6 @@ CONFIG_CCWGROUP=y ...@@ -473,7 +519,6 @@ CONFIG_CCWGROUP=y
CONFIG_UNIX98_PTYS=y CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256 CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_WATCHDOG is not set
CONFIG_HW_RANDOM=m CONFIG_HW_RANDOM=m
# CONFIG_R3964 is not set # CONFIG_R3964 is not set
CONFIG_RAW_DRIVER=m CONFIG_RAW_DRIVER=m
...@@ -490,7 +535,6 @@ CONFIG_TN3270_CONSOLE=y ...@@ -490,7 +535,6 @@ CONFIG_TN3270_CONSOLE=y
CONFIG_TN3215=y CONFIG_TN3215=y
CONFIG_TN3215_CONSOLE=y CONFIG_TN3215_CONSOLE=y
CONFIG_CCW_CONSOLE=y CONFIG_CCW_CONSOLE=y
CONFIG_SCLP=y
CONFIG_SCLP_TTY=y CONFIG_SCLP_TTY=y
CONFIG_SCLP_CONSOLE=y CONFIG_SCLP_CONSOLE=y
CONFIG_SCLP_VT220_TTY=y CONFIG_SCLP_VT220_TTY=y
...@@ -514,6 +558,11 @@ CONFIG_S390_TAPE_34XX=m ...@@ -514,6 +558,11 @@ CONFIG_S390_TAPE_34XX=m
CONFIG_MONWRITER=m CONFIG_MONWRITER=m
CONFIG_S390_VMUR=m CONFIG_S390_VMUR=m
# CONFIG_POWER_SUPPLY is not set # CONFIG_POWER_SUPPLY is not set
# CONFIG_WATCHDOG is not set
#
# Sonics Silicon Backplane
#
# #
# File systems # File systems
...@@ -569,7 +618,6 @@ CONFIG_SYSFS=y ...@@ -569,7 +618,6 @@ CONFIG_SYSFS=y
CONFIG_TMPFS=y CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y CONFIG_TMPFS_POSIX_ACL=y
# CONFIG_HUGETLB_PAGE is not set # CONFIG_HUGETLB_PAGE is not set
CONFIG_RAMFS=y
CONFIG_CONFIGFS_FS=m CONFIG_CONFIGFS_FS=m
# #
...@@ -588,10 +636,7 @@ CONFIG_CONFIGFS_FS=m ...@@ -588,10 +636,7 @@ CONFIG_CONFIGFS_FS=m
# CONFIG_QNX4FS_FS is not set # CONFIG_QNX4FS_FS is not set
# CONFIG_SYSV_FS is not set # CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set # CONFIG_UFS_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
#
# Network File Systems
#
CONFIG_NFS_FS=y CONFIG_NFS_FS=y
CONFIG_NFS_V3=y CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set # CONFIG_NFS_V3_ACL is not set
...@@ -638,27 +683,13 @@ CONFIG_MSDOS_PARTITION=y ...@@ -638,27 +683,13 @@ CONFIG_MSDOS_PARTITION=y
# CONFIG_KARMA_PARTITION is not set # CONFIG_KARMA_PARTITION is not set
# CONFIG_EFI_PARTITION is not set # CONFIG_EFI_PARTITION is not set
# CONFIG_SYSV68_PARTITION is not set # CONFIG_SYSV68_PARTITION is not set
#
# Native Language Support
#
# CONFIG_NLS is not set # CONFIG_NLS is not set
#
# Distributed Lock Manager
#
CONFIG_DLM=m CONFIG_DLM=m
# CONFIG_DLM_DEBUG is not set # CONFIG_DLM_DEBUG is not set
CONFIG_INSTRUMENTATION=y
#
# Instrumentation Support
#
#
# Profiling support
#
# CONFIG_PROFILING is not set # CONFIG_PROFILING is not set
CONFIG_KPROBES=y CONFIG_KPROBES=y
# CONFIG_MARKERS is not set
# #
# Kernel hacking # Kernel hacking
...@@ -682,6 +713,7 @@ CONFIG_DEBUG_SPINLOCK=y ...@@ -682,6 +713,7 @@ CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_MUTEXES=y
# CONFIG_DEBUG_LOCK_ALLOC is not set # CONFIG_DEBUG_LOCK_ALLOC is not set
# CONFIG_PROVE_LOCKING is not set # CONFIG_PROVE_LOCKING is not set
# CONFIG_LOCK_STAT is not set
CONFIG_DEBUG_SPINLOCK_SLEEP=y CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set # CONFIG_DEBUG_KOBJECT is not set
...@@ -694,14 +726,17 @@ CONFIG_FORCED_INLINING=y ...@@ -694,14 +726,17 @@ CONFIG_FORCED_INLINING=y
# CONFIG_RCU_TORTURE_TEST is not set # CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_LKDTM is not set # CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set # CONFIG_FAULT_INJECTION is not set
CONFIG_SAMPLES=y
# #
# Security options # Security options
# #
# CONFIG_KEYS is not set # CONFIG_KEYS is not set
# CONFIG_SECURITY is not set # CONFIG_SECURITY is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
CONFIG_CRYPTO=y CONFIG_CRYPTO=y
CONFIG_CRYPTO_ALGAPI=y CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_AEAD=m
CONFIG_CRYPTO_BLKCIPHER=y CONFIG_CRYPTO_BLKCIPHER=y
CONFIG_CRYPTO_HASH=m CONFIG_CRYPTO_HASH=m
CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_MANAGER=y
...@@ -720,6 +755,7 @@ CONFIG_CRYPTO_ECB=m ...@@ -720,6 +755,7 @@ CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_LRW is not set # CONFIG_CRYPTO_LRW is not set
# CONFIG_CRYPTO_XTS is not set
# CONFIG_CRYPTO_CRYPTD is not set # CONFIG_CRYPTO_CRYPTD is not set
# CONFIG_CRYPTO_DES is not set # CONFIG_CRYPTO_DES is not set
CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_FCRYPT=m
...@@ -733,11 +769,13 @@ CONFIG_CRYPTO_FCRYPT=m ...@@ -733,11 +769,13 @@ CONFIG_CRYPTO_FCRYPT=m
# CONFIG_CRYPTO_ARC4 is not set # CONFIG_CRYPTO_ARC4 is not set
# CONFIG_CRYPTO_KHAZAD is not set # CONFIG_CRYPTO_KHAZAD is not set
# CONFIG_CRYPTO_ANUBIS is not set # CONFIG_CRYPTO_ANUBIS is not set
CONFIG_CRYPTO_SEED=m
# CONFIG_CRYPTO_DEFLATE is not set # CONFIG_CRYPTO_DEFLATE is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set # CONFIG_CRYPTO_MICHAEL_MIC is not set
# CONFIG_CRYPTO_CRC32C is not set # CONFIG_CRYPTO_CRC32C is not set
CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAMELLIA=m
# CONFIG_CRYPTO_TEST is not set # CONFIG_CRYPTO_TEST is not set
CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_HW=y CONFIG_CRYPTO_HW=y
# CONFIG_CRYPTO_SHA1_S390 is not set # CONFIG_CRYPTO_SHA1_S390 is not set
# CONFIG_CRYPTO_SHA256_S390 is not set # CONFIG_CRYPTO_SHA256_S390 is not set
...@@ -755,5 +793,6 @@ CONFIG_BITREVERSE=m ...@@ -755,5 +793,6 @@ CONFIG_BITREVERSE=m
# CONFIG_CRC16 is not set # CONFIG_CRC16 is not set
# CONFIG_CRC_ITU_T is not set # CONFIG_CRC_ITU_T is not set
CONFIG_CRC32=m CONFIG_CRC32=m
CONFIG_CRC7=m
# CONFIG_LIBCRC32C is not set # CONFIG_LIBCRC32C is not set
CONFIG_PLIST=y CONFIG_PLIST=y
...@@ -648,6 +648,8 @@ static int dump_set_type(enum dump_type type) ...@@ -648,6 +648,8 @@ static int dump_set_type(enum dump_type type)
case DUMP_TYPE_CCW: case DUMP_TYPE_CCW:
if (MACHINE_IS_VM) if (MACHINE_IS_VM)
dump_method = DUMP_METHOD_CCW_VM; dump_method = DUMP_METHOD_CCW_VM;
else if (diag308_set_works)
dump_method = DUMP_METHOD_CCW_DIAG;
else else
dump_method = DUMP_METHOD_CCW_CIO; dump_method = DUMP_METHOD_CCW_CIO;
break; break;
......
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/timer.h> #include <asm/timer.h>
#include <asm/cpu.h>
asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
...@@ -91,6 +92,14 @@ EXPORT_SYMBOL(unregister_idle_notifier); ...@@ -91,6 +92,14 @@ EXPORT_SYMBOL(unregister_idle_notifier);
void do_monitor_call(struct pt_regs *regs, long interruption_code) void do_monitor_call(struct pt_regs *regs, long interruption_code)
{ {
struct s390_idle_data *idle;
idle = &__get_cpu_var(s390_idle);
spin_lock(&idle->lock);
idle->idle_time += get_clock() - idle->idle_enter;
idle->in_idle = 0;
spin_unlock(&idle->lock);
/* disable monitor call class 0 */ /* disable monitor call class 0 */
__ctl_clear_bit(8, 15); __ctl_clear_bit(8, 15);
...@@ -105,6 +114,7 @@ extern void s390_handle_mcck(void); ...@@ -105,6 +114,7 @@ extern void s390_handle_mcck(void);
static void default_idle(void) static void default_idle(void)
{ {
int cpu, rc; int cpu, rc;
struct s390_idle_data *idle;
/* CPU is going idle. */ /* CPU is going idle. */
cpu = smp_processor_id(); cpu = smp_processor_id();
...@@ -142,6 +152,12 @@ static void default_idle(void) ...@@ -142,6 +152,12 @@ static void default_idle(void)
return; return;
} }
idle = &__get_cpu_var(s390_idle);
spin_lock(&idle->lock);
idle->idle_count++;
idle->in_idle = 1;
idle->idle_enter = get_clock();
spin_unlock(&idle->lock);
trace_hardirqs_on(); trace_hardirqs_on();
/* Wait for external, I/O or machine check interrupt. */ /* Wait for external, I/O or machine check interrupt. */
__load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
...@@ -254,14 +270,12 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp, ...@@ -254,14 +270,12 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
save_fp_regs(&current->thread.fp_regs); save_fp_regs(&current->thread.fp_regs);
memcpy(&p->thread.fp_regs, &current->thread.fp_regs, memcpy(&p->thread.fp_regs, &current->thread.fp_regs,
sizeof(s390_fp_regs)); sizeof(s390_fp_regs));
p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _SEGMENT_TABLE;
/* Set a new TLS ? */ /* Set a new TLS ? */
if (clone_flags & CLONE_SETTLS) if (clone_flags & CLONE_SETTLS)
p->thread.acrs[0] = regs->gprs[6]; p->thread.acrs[0] = regs->gprs[6];
#else /* CONFIG_64BIT */ #else /* CONFIG_64BIT */
/* Save the fpu registers to new thread structure. */ /* Save the fpu registers to new thread structure. */
save_fp_regs(&p->thread.fp_regs); save_fp_regs(&p->thread.fp_regs);
p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _REGION_TABLE;
/* Set a new TLS ? */ /* Set a new TLS ? */
if (clone_flags & CLONE_SETTLS) { if (clone_flags & CLONE_SETTLS) {
if (test_thread_flag(TIF_31BIT)) { if (test_thread_flag(TIF_31BIT)) {
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/timer.h> #include <asm/timer.h>
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include <asm/cpu.h>
/* /*
* An array with a pointer the lowcore of every CPU. * An array with a pointer the lowcore of every CPU.
...@@ -325,7 +326,7 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig) ...@@ -325,7 +326,7 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
*/ */
void smp_ptlb_callback(void *info) void smp_ptlb_callback(void *info)
{ {
local_flush_tlb(); __tlb_flush_local();
} }
void smp_ptlb_all(void) void smp_ptlb_all(void)
...@@ -494,6 +495,8 @@ int __cpuinit start_secondary(void *cpuvoid) ...@@ -494,6 +495,8 @@ int __cpuinit start_secondary(void *cpuvoid)
return 0; return 0;
} }
DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
static void __init smp_create_idle(unsigned int cpu) static void __init smp_create_idle(unsigned int cpu)
{ {
struct task_struct *p; struct task_struct *p;
...@@ -506,6 +509,7 @@ static void __init smp_create_idle(unsigned int cpu) ...@@ -506,6 +509,7 @@ static void __init smp_create_idle(unsigned int cpu)
if (IS_ERR(p)) if (IS_ERR(p))
panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
current_set[cpu] = p; current_set[cpu] = p;
spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock);
} }
static int cpu_stopped(int cpu) static int cpu_stopped(int cpu)
...@@ -724,6 +728,7 @@ void __init smp_prepare_boot_cpu(void) ...@@ -724,6 +728,7 @@ void __init smp_prepare_boot_cpu(void)
cpu_set(0, cpu_online_map); cpu_set(0, cpu_online_map);
S390_lowcore.percpu_offset = __per_cpu_offset[0]; S390_lowcore.percpu_offset = __per_cpu_offset[0];
current_set[0] = current; current_set[0] = current;
spin_lock_init(&(&__get_cpu_var(s390_idle))->lock);
} }
void __init smp_cpus_done(unsigned int max_cpus) void __init smp_cpus_done(unsigned int max_cpus)
...@@ -756,22 +761,71 @@ static ssize_t show_capability(struct sys_device *dev, char *buf) ...@@ -756,22 +761,71 @@ static ssize_t show_capability(struct sys_device *dev, char *buf)
} }
static SYSDEV_ATTR(capability, 0444, show_capability, NULL); static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
static ssize_t show_idle_count(struct sys_device *dev, char *buf)
{
struct s390_idle_data *idle;
unsigned long long idle_count;
idle = &per_cpu(s390_idle, dev->id);
spin_lock_irq(&idle->lock);
idle_count = idle->idle_count;
spin_unlock_irq(&idle->lock);
return sprintf(buf, "%llu\n", idle_count);
}
static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
static ssize_t show_idle_time(struct sys_device *dev, char *buf)
{
struct s390_idle_data *idle;
unsigned long long new_time;
idle = &per_cpu(s390_idle, dev->id);
spin_lock_irq(&idle->lock);
if (idle->in_idle) {
new_time = get_clock();
idle->idle_time += new_time - idle->idle_enter;
idle->idle_enter = new_time;
}
new_time = idle->idle_time;
spin_unlock_irq(&idle->lock);
return sprintf(buf, "%llu us\n", new_time >> 12);
}
static SYSDEV_ATTR(idle_time, 0444, show_idle_time, NULL);
static struct attribute *cpu_attrs[] = {
&attr_capability.attr,
&attr_idle_count.attr,
&attr_idle_time.attr,
NULL,
};
static struct attribute_group cpu_attr_group = {
.attrs = cpu_attrs,
};
static int __cpuinit smp_cpu_notify(struct notifier_block *self, static int __cpuinit smp_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu) unsigned long action, void *hcpu)
{ {
unsigned int cpu = (unsigned int)(long)hcpu; unsigned int cpu = (unsigned int)(long)hcpu;
struct cpu *c = &per_cpu(cpu_devices, cpu); struct cpu *c = &per_cpu(cpu_devices, cpu);
struct sys_device *s = &c->sysdev; struct sys_device *s = &c->sysdev;
struct s390_idle_data *idle;
switch (action) { switch (action) {
case CPU_ONLINE: case CPU_ONLINE:
case CPU_ONLINE_FROZEN: case CPU_ONLINE_FROZEN:
if (sysdev_create_file(s, &attr_capability)) idle = &per_cpu(s390_idle, cpu);
spin_lock_irq(&idle->lock);
idle->idle_enter = 0;
idle->idle_time = 0;
idle->idle_count = 0;
spin_unlock_irq(&idle->lock);
if (sysfs_create_group(&s->kobj, &cpu_attr_group))
return NOTIFY_BAD; return NOTIFY_BAD;
break; break;
case CPU_DEAD: case CPU_DEAD:
case CPU_DEAD_FROZEN: case CPU_DEAD_FROZEN:
sysdev_remove_file(s, &attr_capability); sysfs_remove_group(&s->kobj, &cpu_attr_group);
break; break;
} }
return NOTIFY_OK; return NOTIFY_OK;
...@@ -784,6 +838,7 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = { ...@@ -784,6 +838,7 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = {
static int __init topology_init(void) static int __init topology_init(void)
{ {
int cpu; int cpu;
int rc;
register_cpu_notifier(&smp_cpu_nb); register_cpu_notifier(&smp_cpu_nb);
...@@ -796,7 +851,9 @@ static int __init topology_init(void) ...@@ -796,7 +851,9 @@ static int __init topology_init(void)
if (!cpu_online(cpu)) if (!cpu_online(cpu))
continue; continue;
s = &c->sysdev; s = &c->sysdev;
sysdev_create_file(s, &attr_capability); rc = sysfs_create_group(&s->kobj, &cpu_attr_group);
if (rc)
return rc;
} }
return 0; return 0;
} }
......
...@@ -15,6 +15,27 @@ ...@@ -15,6 +15,27 @@
#include <asm/futex.h> #include <asm/futex.h>
#include "uaccess.h" #include "uaccess.h"
static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
return NULL;
pud = pud_offset(pgd, addr);
if (pud_none(*pud) || unlikely(pud_bad(*pud)))
return NULL;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
return NULL;
return pte_offset_map(pmd, addr);
}
static int __handle_fault(struct mm_struct *mm, unsigned long address, static int __handle_fault(struct mm_struct *mm, unsigned long address,
int write_access) int write_access)
{ {
...@@ -85,8 +106,6 @@ static size_t __user_copy_pt(unsigned long uaddr, void *kptr, ...@@ -85,8 +106,6 @@ static size_t __user_copy_pt(unsigned long uaddr, void *kptr,
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long offset, pfn, done, size; unsigned long offset, pfn, done, size;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte; pte_t *pte;
void *from, *to; void *from, *to;
...@@ -94,15 +113,7 @@ static size_t __user_copy_pt(unsigned long uaddr, void *kptr, ...@@ -94,15 +113,7 @@ static size_t __user_copy_pt(unsigned long uaddr, void *kptr,
retry: retry:
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
do { do {
pgd = pgd_offset(mm, uaddr); pte = follow_table(mm, uaddr);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
goto fault;
pmd = pmd_offset(pgd, uaddr);
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
goto fault;
pte = pte_offset_map(pmd, uaddr);
if (!pte || !pte_present(*pte) || if (!pte || !pte_present(*pte) ||
(write_user && !pte_write(*pte))) (write_user && !pte_write(*pte)))
goto fault; goto fault;
...@@ -142,22 +153,12 @@ static unsigned long __dat_user_addr(unsigned long uaddr) ...@@ -142,22 +153,12 @@ static unsigned long __dat_user_addr(unsigned long uaddr)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long pfn, ret; unsigned long pfn, ret;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte; pte_t *pte;
int rc; int rc;
ret = 0; ret = 0;
retry: retry:
pgd = pgd_offset(mm, uaddr); pte = follow_table(mm, uaddr);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
goto fault;
pmd = pmd_offset(pgd, uaddr);
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
goto fault;
pte = pte_offset_map(pmd, uaddr);
if (!pte || !pte_present(*pte)) if (!pte || !pte_present(*pte))
goto fault; goto fault;
...@@ -229,8 +230,6 @@ static size_t strnlen_user_pt(size_t count, const char __user *src) ...@@ -229,8 +230,6 @@ static size_t strnlen_user_pt(size_t count, const char __user *src)
unsigned long uaddr = (unsigned long) src; unsigned long uaddr = (unsigned long) src;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long offset, pfn, done, len; unsigned long offset, pfn, done, len;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte; pte_t *pte;
size_t len_str; size_t len_str;
...@@ -240,15 +239,7 @@ static size_t strnlen_user_pt(size_t count, const char __user *src) ...@@ -240,15 +239,7 @@ static size_t strnlen_user_pt(size_t count, const char __user *src)
retry: retry:
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
do { do {
pgd = pgd_offset(mm, uaddr); pte = follow_table(mm, uaddr);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
goto fault;
pmd = pmd_offset(pgd, uaddr);
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
goto fault;
pte = pte_offset_map(pmd, uaddr);
if (!pte || !pte_present(*pte)) if (!pte || !pte_present(*pte))
goto fault; goto fault;
...@@ -308,8 +299,6 @@ static size_t copy_in_user_pt(size_t n, void __user *to, ...@@ -308,8 +299,6 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
uaddr, done, size; uaddr, done, size;
unsigned long uaddr_from = (unsigned long) from; unsigned long uaddr_from = (unsigned long) from;
unsigned long uaddr_to = (unsigned long) to; unsigned long uaddr_to = (unsigned long) to;
pgd_t *pgd_from, *pgd_to;
pmd_t *pmd_from, *pmd_to;
pte_t *pte_from, *pte_to; pte_t *pte_from, *pte_to;
int write_user; int write_user;
...@@ -317,39 +306,14 @@ static size_t copy_in_user_pt(size_t n, void __user *to, ...@@ -317,39 +306,14 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
retry: retry:
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
do { do {
pgd_from = pgd_offset(mm, uaddr_from); pte_from = follow_table(mm, uaddr_from);
if (pgd_none(*pgd_from) || unlikely(pgd_bad(*pgd_from))) {
uaddr = uaddr_from;
write_user = 0;
goto fault;
}
pgd_to = pgd_offset(mm, uaddr_to);
if (pgd_none(*pgd_to) || unlikely(pgd_bad(*pgd_to))) {
uaddr = uaddr_to;
write_user = 1;
goto fault;
}
pmd_from = pmd_offset(pgd_from, uaddr_from);
if (pmd_none(*pmd_from) || unlikely(pmd_bad(*pmd_from))) {
uaddr = uaddr_from;
write_user = 0;
goto fault;
}
pmd_to = pmd_offset(pgd_to, uaddr_to);
if (pmd_none(*pmd_to) || unlikely(pmd_bad(*pmd_to))) {
uaddr = uaddr_to;
write_user = 1;
goto fault;
}
pte_from = pte_offset_map(pmd_from, uaddr_from);
if (!pte_from || !pte_present(*pte_from)) { if (!pte_from || !pte_present(*pte_from)) {
uaddr = uaddr_from; uaddr = uaddr_from;
write_user = 0; write_user = 0;
goto fault; goto fault;
} }
pte_to = pte_offset_map(pmd_to, uaddr_to);
pte_to = follow_table(mm, uaddr_to);
if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) { if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) {
uaddr = uaddr_to; uaddr = uaddr_to;
write_user = 1; write_user = 1;
......
...@@ -2,6 +2,6 @@ ...@@ -2,6 +2,6 @@
# Makefile for the linux s390-specific parts of the memory manager. # Makefile for the linux s390-specific parts of the memory manager.
# #
obj-y := init.o fault.o extmem.o mmap.o vmem.o obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o
obj-$(CONFIG_CMM) += cmm.o obj-$(CONFIG_CMM) += cmm.o
...@@ -81,6 +81,7 @@ void show_mem(void) ...@@ -81,6 +81,7 @@ void show_mem(void)
static void __init setup_ro_region(void) static void __init setup_ro_region(void)
{ {
pgd_t *pgd; pgd_t *pgd;
pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte; pte_t *pte;
pte_t new_pte; pte_t new_pte;
...@@ -91,7 +92,8 @@ static void __init setup_ro_region(void) ...@@ -91,7 +92,8 @@ static void __init setup_ro_region(void)
for (; address < end; address += PAGE_SIZE) { for (; address < end; address += PAGE_SIZE) {
pgd = pgd_offset_k(address); pgd = pgd_offset_k(address);
pmd = pmd_offset(pgd, address); pud = pud_offset(pgd, address);
pmd = pmd_offset(pud, address);
pte = pte_offset_kernel(pmd, address); pte = pte_offset_kernel(pmd, address);
new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO)); new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO));
*pte = new_pte; *pte = new_pte;
...@@ -103,32 +105,28 @@ static void __init setup_ro_region(void) ...@@ -103,32 +105,28 @@ static void __init setup_ro_region(void)
*/ */
void __init paging_init(void) void __init paging_init(void)
{ {
pgd_t *pg_dir;
int i;
unsigned long pgdir_k;
static const int ssm_mask = 0x04000000L; static const int ssm_mask = 0x04000000L;
unsigned long max_zone_pfns[MAX_NR_ZONES]; unsigned long max_zone_pfns[MAX_NR_ZONES];
unsigned long pgd_type;
pg_dir = swapper_pg_dir; init_mm.pgd = swapper_pg_dir;
S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK;
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE; S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
for (i = 0; i < PTRS_PER_PGD; i++) pgd_type = _REGION3_ENTRY_EMPTY;
pgd_clear_kernel(pg_dir + i);
#else #else
pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH;
for (i = 0; i < PTRS_PER_PGD; i++) pgd_type = _SEGMENT_ENTRY_EMPTY;
pmd_clear_kernel((pmd_t *)(pg_dir + i));
#endif #endif
clear_table((unsigned long *) init_mm.pgd, pgd_type,
sizeof(unsigned long)*2048);
vmem_map_init(); vmem_map_init();
setup_ro_region(); setup_ro_region();
S390_lowcore.kernel_asce = pgdir_k;
/* enable virtual mapping in kernel mode */ /* enable virtual mapping in kernel mode */
__ctl_load(pgdir_k, 1, 1); __ctl_load(S390_lowcore.kernel_asce, 1, 1);
__ctl_load(pgdir_k, 7, 7); __ctl_load(S390_lowcore.kernel_asce, 7, 7);
__ctl_load(pgdir_k, 13, 13); __ctl_load(S390_lowcore.kernel_asce, 13, 13);
__raw_local_irq_ssm(ssm_mask); __raw_local_irq_ssm(ssm_mask);
memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
......
/*
* arch/s390/mm/pgtable.c
*
* Copyright IBM Corp. 2007
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/quicklist.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#ifndef CONFIG_64BIT
#define ALLOC_ORDER 1
#else
#define ALLOC_ORDER 2
#endif
unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
{
struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
if (!page)
return NULL;
page->index = 0;
if (noexec) {
struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
if (!shadow) {
__free_pages(page, ALLOC_ORDER);
return NULL;
}
page->index = page_to_phys(shadow);
}
return (unsigned long *) page_to_phys(page);
}
void crst_table_free(unsigned long *table)
{
unsigned long *shadow = get_shadow_table(table);
if (shadow)
free_pages((unsigned long) shadow, ALLOC_ORDER);
free_pages((unsigned long) table, ALLOC_ORDER);
}
/*
* page table entry allocation/free routines.
*/
unsigned long *page_table_alloc(int noexec)
{
struct page *page = alloc_page(GFP_KERNEL);
unsigned long *table;
if (!page)
return NULL;
page->index = 0;
if (noexec) {
struct page *shadow = alloc_page(GFP_KERNEL);
if (!shadow) {
__free_page(page);
return NULL;
}
table = (unsigned long *) page_to_phys(shadow);
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
page->index = (addr_t) table;
}
table = (unsigned long *) page_to_phys(page);
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
return table;
}
void page_table_free(unsigned long *table)
{
unsigned long *shadow = get_shadow_pte(table);
if (shadow)
free_page((unsigned long) shadow);
free_page((unsigned long) table);
}
...@@ -73,31 +73,28 @@ static void __init_refok *vmem_alloc_pages(unsigned int order) ...@@ -73,31 +73,28 @@ static void __init_refok *vmem_alloc_pages(unsigned int order)
return alloc_bootmem_pages((1 << order) * PAGE_SIZE); return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
} }
#define vmem_pud_alloc() ({ BUG(); ((pud_t *) NULL); })
static inline pmd_t *vmem_pmd_alloc(void) static inline pmd_t *vmem_pmd_alloc(void)
{ {
pmd_t *pmd; pmd_t *pmd = NULL;
int i;
pmd = vmem_alloc_pages(PMD_ALLOC_ORDER); #ifdef CONFIG_64BIT
pmd = vmem_alloc_pages(2);
if (!pmd) if (!pmd)
return NULL; return NULL;
for (i = 0; i < PTRS_PER_PMD; i++) clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE*4);
pmd_clear_kernel(pmd + i); #endif
return pmd; return pmd;
} }
static inline pte_t *vmem_pte_alloc(void) static inline pte_t *vmem_pte_alloc(void)
{ {
pte_t *pte; pte_t *pte = vmem_alloc_pages(0);
pte_t empty_pte;
int i;
pte = vmem_alloc_pages(PTE_ALLOC_ORDER);
if (!pte) if (!pte)
return NULL; return NULL;
pte_val(empty_pte) = _PAGE_TYPE_EMPTY; clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, PAGE_SIZE);
for (i = 0; i < PTRS_PER_PTE; i++)
pte[i] = empty_pte;
return pte; return pte;
} }
...@@ -108,6 +105,7 @@ static int vmem_add_range(unsigned long start, unsigned long size) ...@@ -108,6 +105,7 @@ static int vmem_add_range(unsigned long start, unsigned long size)
{ {
unsigned long address; unsigned long address;
pgd_t *pg_dir; pgd_t *pg_dir;
pud_t *pu_dir;
pmd_t *pm_dir; pmd_t *pm_dir;
pte_t *pt_dir; pte_t *pt_dir;
pte_t pte; pte_t pte;
...@@ -116,13 +114,21 @@ static int vmem_add_range(unsigned long start, unsigned long size) ...@@ -116,13 +114,21 @@ static int vmem_add_range(unsigned long start, unsigned long size)
for (address = start; address < start + size; address += PAGE_SIZE) { for (address = start; address < start + size; address += PAGE_SIZE) {
pg_dir = pgd_offset_k(address); pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) { if (pgd_none(*pg_dir)) {
pu_dir = vmem_pud_alloc();
if (!pu_dir)
goto out;
pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
}
pu_dir = pud_offset(pg_dir, address);
if (pud_none(*pu_dir)) {
pm_dir = vmem_pmd_alloc(); pm_dir = vmem_pmd_alloc();
if (!pm_dir) if (!pm_dir)
goto out; goto out;
pgd_populate_kernel(&init_mm, pg_dir, pm_dir); pud_populate_kernel(&init_mm, pu_dir, pm_dir);
} }
pm_dir = pmd_offset(pg_dir, address); pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) { if (pmd_none(*pm_dir)) {
pt_dir = vmem_pte_alloc(); pt_dir = vmem_pte_alloc();
if (!pt_dir) if (!pt_dir)
...@@ -148,6 +154,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size) ...@@ -148,6 +154,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
{ {
unsigned long address; unsigned long address;
pgd_t *pg_dir; pgd_t *pg_dir;
pud_t *pu_dir;
pmd_t *pm_dir; pmd_t *pm_dir;
pte_t *pt_dir; pte_t *pt_dir;
pte_t pte; pte_t pte;
...@@ -155,9 +162,10 @@ static void vmem_remove_range(unsigned long start, unsigned long size) ...@@ -155,9 +162,10 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
pte_val(pte) = _PAGE_TYPE_EMPTY; pte_val(pte) = _PAGE_TYPE_EMPTY;
for (address = start; address < start + size; address += PAGE_SIZE) { for (address = start; address < start + size; address += PAGE_SIZE) {
pg_dir = pgd_offset_k(address); pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) pu_dir = pud_offset(pg_dir, address);
if (pud_none(*pu_dir))
continue; continue;
pm_dir = pmd_offset(pg_dir, address); pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) if (pmd_none(*pm_dir))
continue; continue;
pt_dir = pte_offset_kernel(pm_dir, address); pt_dir = pte_offset_kernel(pm_dir, address);
...@@ -174,6 +182,7 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size) ...@@ -174,6 +182,7 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
unsigned long address, start_addr, end_addr; unsigned long address, start_addr, end_addr;
struct page *map_start, *map_end; struct page *map_start, *map_end;
pgd_t *pg_dir; pgd_t *pg_dir;
pud_t *pu_dir;
pmd_t *pm_dir; pmd_t *pm_dir;
pte_t *pt_dir; pte_t *pt_dir;
pte_t pte; pte_t pte;
...@@ -188,13 +197,21 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size) ...@@ -188,13 +197,21 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
for (address = start_addr; address < end_addr; address += PAGE_SIZE) { for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
pg_dir = pgd_offset_k(address); pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) { if (pgd_none(*pg_dir)) {
pu_dir = vmem_pud_alloc();
if (!pu_dir)
goto out;
pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
}
pu_dir = pud_offset(pg_dir, address);
if (pud_none(*pu_dir)) {
pm_dir = vmem_pmd_alloc(); pm_dir = vmem_pmd_alloc();
if (!pm_dir) if (!pm_dir)
goto out; goto out;
pgd_populate_kernel(&init_mm, pg_dir, pm_dir); pud_populate_kernel(&init_mm, pu_dir, pm_dir);
} }
pm_dir = pmd_offset(pg_dir, address); pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) { if (pmd_none(*pm_dir)) {
pt_dir = vmem_pte_alloc(); pt_dir = vmem_pte_alloc();
if (!pt_dir) if (!pt_dir)
......
...@@ -48,8 +48,8 @@ struct raw3270 { ...@@ -48,8 +48,8 @@ struct raw3270 {
struct timer_list timer; /* Device timer. */ struct timer_list timer; /* Device timer. */
unsigned char *ascebc; /* ascii -> ebcdic table */ unsigned char *ascebc; /* ascii -> ebcdic table */
struct class_device *clttydev; /* 3270-class tty device ptr */ struct device *clttydev; /* 3270-class tty device ptr */
struct class_device *cltubdev; /* 3270-class tub device ptr */ struct device *cltubdev; /* 3270-class tub device ptr */
struct raw3270_request init_request; struct raw3270_request init_request;
unsigned char init_data[256]; unsigned char init_data[256];
...@@ -1107,11 +1107,9 @@ raw3270_delete_device(struct raw3270 *rp) ...@@ -1107,11 +1107,9 @@ raw3270_delete_device(struct raw3270 *rp)
/* Remove from device chain. */ /* Remove from device chain. */
mutex_lock(&raw3270_mutex); mutex_lock(&raw3270_mutex);
if (rp->clttydev && !IS_ERR(rp->clttydev)) if (rp->clttydev && !IS_ERR(rp->clttydev))
class_device_destroy(class3270, device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor));
MKDEV(IBM_TTY3270_MAJOR, rp->minor));
if (rp->cltubdev && !IS_ERR(rp->cltubdev)) if (rp->cltubdev && !IS_ERR(rp->cltubdev))
class_device_destroy(class3270, device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, rp->minor));
MKDEV(IBM_FS3270_MAJOR, rp->minor));
list_del_init(&rp->list); list_del_init(&rp->list);
mutex_unlock(&raw3270_mutex); mutex_unlock(&raw3270_mutex);
...@@ -1181,24 +1179,22 @@ static int raw3270_create_attributes(struct raw3270 *rp) ...@@ -1181,24 +1179,22 @@ static int raw3270_create_attributes(struct raw3270 *rp)
if (rc) if (rc)
goto out; goto out;
rp->clttydev = class_device_create(class3270, NULL, rp->clttydev = device_create(class3270, &rp->cdev->dev,
MKDEV(IBM_TTY3270_MAJOR, rp->minor), MKDEV(IBM_TTY3270_MAJOR, rp->minor),
&rp->cdev->dev, "tty%s", "tty%s", rp->cdev->dev.bus_id);
rp->cdev->dev.bus_id);
if (IS_ERR(rp->clttydev)) { if (IS_ERR(rp->clttydev)) {
rc = PTR_ERR(rp->clttydev); rc = PTR_ERR(rp->clttydev);
goto out_ttydev; goto out_ttydev;
} }
rp->cltubdev = class_device_create(class3270, NULL, rp->cltubdev = device_create(class3270, &rp->cdev->dev,
MKDEV(IBM_FS3270_MAJOR, rp->minor), MKDEV(IBM_FS3270_MAJOR, rp->minor),
&rp->cdev->dev, "tub%s", "tub%s", rp->cdev->dev.bus_id);
rp->cdev->dev.bus_id);
if (!IS_ERR(rp->cltubdev)) if (!IS_ERR(rp->cltubdev))
goto out; goto out;
rc = PTR_ERR(rp->cltubdev); rc = PTR_ERR(rp->cltubdev);
class_device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor)); device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor));
out_ttydev: out_ttydev:
sysfs_remove_group(&rp->cdev->dev.kobj, &raw3270_attr_group); sysfs_remove_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
......
...@@ -69,11 +69,8 @@ struct tape_class_device *register_tape_dev( ...@@ -69,11 +69,8 @@ struct tape_class_device *register_tape_dev(
if (rc) if (rc)
goto fail_with_cdev; goto fail_with_cdev;
tcd->class_device = class_device_create( tcd->class_device = device_create(tape_class, device,
tape_class,
NULL,
tcd->char_device->dev, tcd->char_device->dev,
device,
"%s", tcd->device_name "%s", tcd->device_name
); );
rc = IS_ERR(tcd->class_device) ? PTR_ERR(tcd->class_device) : 0; rc = IS_ERR(tcd->class_device) ? PTR_ERR(tcd->class_device) : 0;
...@@ -90,7 +87,7 @@ struct tape_class_device *register_tape_dev( ...@@ -90,7 +87,7 @@ struct tape_class_device *register_tape_dev(
return tcd; return tcd;
fail_with_class_device: fail_with_class_device:
class_device_destroy(tape_class, tcd->char_device->dev); device_destroy(tape_class, tcd->char_device->dev);
fail_with_cdev: fail_with_cdev:
cdev_del(tcd->char_device); cdev_del(tcd->char_device);
...@@ -105,11 +102,9 @@ EXPORT_SYMBOL(register_tape_dev); ...@@ -105,11 +102,9 @@ EXPORT_SYMBOL(register_tape_dev);
void unregister_tape_dev(struct tape_class_device *tcd) void unregister_tape_dev(struct tape_class_device *tcd)
{ {
if (tcd != NULL && !IS_ERR(tcd)) { if (tcd != NULL && !IS_ERR(tcd)) {
sysfs_remove_link( sysfs_remove_link(&tcd->class_device->kobj,
&tcd->class_device->dev->kobj, tcd->mode_name);
tcd->mode_name device_destroy(tape_class, tcd->char_device->dev);
);
class_device_destroy(tape_class, tcd->char_device->dev);
cdev_del(tcd->char_device); cdev_del(tcd->char_device);
kfree(tcd); kfree(tcd);
} }
......
...@@ -24,8 +24,8 @@ ...@@ -24,8 +24,8 @@
#define TAPECLASS_NAME_LEN 32 #define TAPECLASS_NAME_LEN 32
struct tape_class_device { struct tape_class_device {
struct cdev * char_device; struct cdev *char_device;
struct class_device * class_device; struct device *class_device;
char device_name[TAPECLASS_NAME_LEN]; char device_name[TAPECLASS_NAME_LEN];
char mode_name[TAPECLASS_NAME_LEN]; char mode_name[TAPECLASS_NAME_LEN];
}; };
......
...@@ -74,7 +74,7 @@ struct vmlogrdr_priv_t { ...@@ -74,7 +74,7 @@ struct vmlogrdr_priv_t {
int dev_in_use; /* 1: already opened, 0: not opened*/ int dev_in_use; /* 1: already opened, 0: not opened*/
spinlock_t priv_lock; spinlock_t priv_lock;
struct device *device; struct device *device;
struct class_device *class_device; struct device *class_device;
int autorecording; int autorecording;
int autopurge; int autopurge;
}; };
...@@ -762,12 +762,10 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) ...@@ -762,12 +762,10 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
device_unregister(dev); device_unregister(dev);
return ret; return ret;
} }
priv->class_device = class_device_create( priv->class_device = device_create(vmlogrdr_class, dev,
vmlogrdr_class, MKDEV(vmlogrdr_major,
NULL, priv->minor_num),
MKDEV(vmlogrdr_major, priv->minor_num), "%s", dev->bus_id);
dev,
"%s", dev->bus_id );
if (IS_ERR(priv->class_device)) { if (IS_ERR(priv->class_device)) {
ret = PTR_ERR(priv->class_device); ret = PTR_ERR(priv->class_device);
priv->class_device=NULL; priv->class_device=NULL;
...@@ -783,8 +781,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) ...@@ -783,8 +781,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv) static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
{ {
class_device_destroy(vmlogrdr_class, device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
MKDEV(vmlogrdr_major, priv->minor_num));
if (priv->device != NULL) { if (priv->device != NULL) {
sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group); sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group);
device_unregister(priv->device); device_unregister(priv->device);
......
...@@ -246,7 +246,7 @@ int chp_add_cmg_attr(struct channel_path *chp) ...@@ -246,7 +246,7 @@ int chp_add_cmg_attr(struct channel_path *chp)
static ssize_t chp_status_show(struct device *dev, static ssize_t chp_status_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct channel_path *chp = container_of(dev, struct channel_path, dev); struct channel_path *chp = to_channelpath(dev);
if (!chp) if (!chp)
return 0; return 0;
...@@ -258,7 +258,7 @@ static ssize_t chp_status_write(struct device *dev, ...@@ -258,7 +258,7 @@ static ssize_t chp_status_write(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
{ {
struct channel_path *cp = container_of(dev, struct channel_path, dev); struct channel_path *cp = to_channelpath(dev);
char cmd[10]; char cmd[10];
int num_args; int num_args;
int error; int error;
...@@ -286,7 +286,7 @@ static ssize_t chp_configure_show(struct device *dev, ...@@ -286,7 +286,7 @@ static ssize_t chp_configure_show(struct device *dev,
struct channel_path *cp; struct channel_path *cp;
int status; int status;
cp = container_of(dev, struct channel_path, dev); cp = to_channelpath(dev);
status = chp_info_get_status(cp->chpid); status = chp_info_get_status(cp->chpid);
if (status < 0) if (status < 0)
return status; return status;
...@@ -308,7 +308,7 @@ static ssize_t chp_configure_write(struct device *dev, ...@@ -308,7 +308,7 @@ static ssize_t chp_configure_write(struct device *dev,
return -EINVAL; return -EINVAL;
if (val != 0 && val != 1) if (val != 0 && val != 1)
return -EINVAL; return -EINVAL;
cp = container_of(dev, struct channel_path, dev); cp = to_channelpath(dev);
chp_cfg_schedule(cp->chpid, val); chp_cfg_schedule(cp->chpid, val);
cfg_wait_idle(); cfg_wait_idle();
...@@ -320,7 +320,7 @@ static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write); ...@@ -320,7 +320,7 @@ static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write);
static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr, static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
char *buf) char *buf)
{ {
struct channel_path *chp = container_of(dev, struct channel_path, dev); struct channel_path *chp = to_channelpath(dev);
if (!chp) if (!chp)
return 0; return 0;
...@@ -374,7 +374,7 @@ static void chp_release(struct device *dev) ...@@ -374,7 +374,7 @@ static void chp_release(struct device *dev)
{ {
struct channel_path *cp; struct channel_path *cp;
cp = container_of(dev, struct channel_path, dev); cp = to_channelpath(dev);
kfree(cp); kfree(cp);
} }
......
...@@ -182,6 +182,15 @@ static int css_register_subchannel(struct subchannel *sch) ...@@ -182,6 +182,15 @@ static int css_register_subchannel(struct subchannel *sch)
sch->dev.bus = &css_bus_type; sch->dev.bus = &css_bus_type;
sch->dev.release = &css_subchannel_release; sch->dev.release = &css_subchannel_release;
sch->dev.groups = subch_attr_groups; sch->dev.groups = subch_attr_groups;
/*
* We don't want to generate uevents for I/O subchannels that don't
* have a working ccw device behind them since they will be
* unregistered before they can be used anyway, so we delay the add
* uevent until after device recognition was successful.
*/
if (!cio_is_console(sch->schid))
/* Console is special, no need to suppress. */
sch->dev.uevent_suppress = 1;
css_update_ssd_info(sch); css_update_ssd_info(sch);
/* make it known to the system */ /* make it known to the system */
ret = css_sch_device_register(sch); ret = css_sch_device_register(sch);
......
/*
* include/asm-s390/cpu.h
*
* Copyright IBM Corp. 2007
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#ifndef _ASM_S390_CPU_H_
#define _ASM_S390_CPU_H_
#include <linux/types.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
struct s390_idle_data {
spinlock_t lock;
unsigned int in_idle;
unsigned long long idle_count;
unsigned long long idle_enter;
unsigned long long idle_time;
};
DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
#endif /* _ASM_S390_CPU_H_ */
...@@ -21,45 +21,43 @@ ...@@ -21,45 +21,43 @@
#ifndef __s390x__ #ifndef __s390x__
#define LCTL_OPCODE "lctl" #define LCTL_OPCODE "lctl"
#define PGTABLE_BITS (_SEGMENT_TABLE|USER_STD_MASK)
#else #else
#define LCTL_OPCODE "lctlg" #define LCTL_OPCODE "lctlg"
#define PGTABLE_BITS (_REGION_TABLE|USER_STD_MASK)
#endif #endif
static inline void enter_lazy_tlb(struct mm_struct *mm, static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
struct task_struct *tsk)
{
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{ {
pgd_t *shadow_pgd = get_shadow_pgd(next->pgd); pgd_t *pgd = mm->pgd;
unsigned long asce_bits;
if (prev != next) { /* Calculate asce bits from the first pgd table entry. */
S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) | asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
PGTABLE_BITS; #ifdef CONFIG_64BIT
if (shadow_pgd) { asce_bits |= _ASCE_TYPE_REGION3;
/* Load primary/secondary space page table origin. */ #endif
S390_lowcore.user_exec_asce = S390_lowcore.user_asce = asce_bits | __pa(pgd);
(__pa(shadow_pgd) & PAGE_MASK) | PGTABLE_BITS; if (switch_amode) {
asm volatile(LCTL_OPCODE" 1,1,%0\n"
LCTL_OPCODE" 7,7,%1"
: : "m" (S390_lowcore.user_exec_asce),
"m" (S390_lowcore.user_asce) );
} else if (switch_amode) {
/* Load primary space page table origin. */ /* Load primary space page table origin. */
asm volatile(LCTL_OPCODE" 1,1,%0" pgd_t *shadow_pgd = get_shadow_table(pgd) ? : pgd;
: : "m" (S390_lowcore.user_asce) ); S390_lowcore.user_exec_asce = asce_bits | __pa(shadow_pgd);
asm volatile(LCTL_OPCODE" 1,1,%0\n"
: : "m" (S390_lowcore.user_exec_asce) );
} else } else
/* Load home space page table origin. */ /* Load home space page table origin. */
asm volatile(LCTL_OPCODE" 13,13,%0" asm volatile(LCTL_OPCODE" 13,13,%0"
: : "m" (S390_lowcore.user_asce) ); : : "m" (S390_lowcore.user_asce) );
} }
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
if (unlikely(prev == next))
return;
cpu_set(smp_processor_id(), next->cpu_vm_mask); cpu_set(smp_processor_id(), next->cpu_vm_mask);
update_mm(next, tsk);
} }
#define enter_lazy_tlb(mm,tsk) do { } while (0)
#define deactivate_mm(tsk,mm) do { } while (0) #define deactivate_mm(tsk,mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev, static inline void activate_mm(struct mm_struct *prev,
......
...@@ -82,6 +82,7 @@ typedef struct { unsigned long pte; } pte_t; ...@@ -82,6 +82,7 @@ typedef struct { unsigned long pte; } pte_t;
#ifndef __s390x__ #ifndef __s390x__
typedef struct { unsigned long pmd; } pmd_t; typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pud; } pud_t;
typedef struct { typedef struct {
unsigned long pgd0; unsigned long pgd0;
unsigned long pgd1; unsigned long pgd1;
...@@ -90,6 +91,7 @@ typedef struct { ...@@ -90,6 +91,7 @@ typedef struct {
} pgd_t; } pgd_t;
#define pmd_val(x) ((x).pmd) #define pmd_val(x) ((x).pmd)
#define pud_val(x) ((x).pud)
#define pgd_val(x) ((x).pgd0) #define pgd_val(x) ((x).pgd0)
#else /* __s390x__ */ #else /* __s390x__ */
...@@ -98,10 +100,12 @@ typedef struct { ...@@ -98,10 +100,12 @@ typedef struct {
unsigned long pmd0; unsigned long pmd0;
unsigned long pmd1; unsigned long pmd1;
} pmd_t; } pmd_t;
typedef struct { unsigned long pud; } pud_t;
typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgd; } pgd_t;
#define pmd_val(x) ((x).pmd0) #define pmd_val(x) ((x).pmd0)
#define pmd_val1(x) ((x).pmd1) #define pmd_val1(x) ((x).pmd1)
#define pud_val(x) ((x).pud)
#define pgd_val(x) ((x).pgd) #define pgd_val(x) ((x).pgd)
#endif /* __s390x__ */ #endif /* __s390x__ */
......
...@@ -19,140 +19,115 @@ ...@@ -19,140 +19,115 @@
#define check_pgt_cache() do {} while (0) #define check_pgt_cache() do {} while (0)
/* unsigned long *crst_table_alloc(struct mm_struct *, int);
* Page allocation orders. void crst_table_free(unsigned long *);
*/
#ifndef __s390x__
# define PTE_ALLOC_ORDER 0
# define PMD_ALLOC_ORDER 0
# define PGD_ALLOC_ORDER 1
#else /* __s390x__ */
# define PTE_ALLOC_ORDER 0
# define PMD_ALLOC_ORDER 2
# define PGD_ALLOC_ORDER 2
#endif /* __s390x__ */
/* unsigned long *page_table_alloc(int);
* Allocate and free page tables. The xxx_kernel() versions are void page_table_free(unsigned long *);
* used to allocate a kernel page table - this turns on ASN bits
* if any.
*/
static inline pgd_t *pgd_alloc(struct mm_struct *mm) static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
{ {
pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER); *s = val;
int i; n = (n / 256) - 1;
asm volatile(
if (!pgd) #ifdef CONFIG_64BIT
return NULL; " mvc 8(248,%0),0(%0)\n"
if (s390_noexec) {
pgd_t *shadow_pgd = (pgd_t *)
__get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
struct page *page = virt_to_page(pgd);
if (!shadow_pgd) {
free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
return NULL;
}
page->lru.next = (void *) shadow_pgd;
}
for (i = 0; i < PTRS_PER_PGD; i++)
#ifndef __s390x__
pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
#else #else
pgd_clear(pgd + i); " mvc 4(252,%0),0(%0)\n"
#endif #endif
return pgd; "0: mvc 256(256,%0),0(%0)\n"
" la %0,256(%0)\n"
" brct %1,0b\n"
: "+a" (s), "+d" (n));
} }
static inline void pgd_free(pgd_t *pgd) static inline void crst_table_init(unsigned long *crst, unsigned long entry)
{ {
pgd_t *shadow_pgd = get_shadow_pgd(pgd); clear_table(crst, entry, sizeof(unsigned long)*2048);
crst = get_shadow_table(crst);
if (shadow_pgd) if (crst)
free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER); clear_table(crst, entry, sizeof(unsigned long)*2048);
free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
} }
#ifndef __s390x__ #ifndef __s390x__
/*
* page middle directory allocation/free routines. static inline unsigned long pgd_entry_type(struct mm_struct *mm)
* We use pmd cache only on s390x, so these are dummy routines. This {
* code never triggers because the pgd will always be present. return _SEGMENT_ENTRY_EMPTY;
*/ }
#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
#define pud_free(x) do { } while (0)
#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(x) do { } while (0) #define pmd_free(x) do { } while (0)
#define __pmd_free_tlb(tlb,x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG() #define pgd_populate(mm, pgd, pud) BUG()
#define pgd_populate_kernel(mm, pmd, pte) BUG() #define pgd_populate_kernel(mm, pgd, pud) BUG()
#define pud_populate(mm, pud, pmd) BUG()
#define pud_populate_kernel(mm, pud, pmd) BUG()
#else /* __s390x__ */ #else /* __s390x__ */
static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
static inline unsigned long pgd_entry_type(struct mm_struct *mm)
{ {
pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER); return _REGION3_ENTRY_EMPTY;
int i;
if (!pmd)
return NULL;
if (s390_noexec) {
pmd_t *shadow_pmd = (pmd_t *)
__get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
struct page *page = virt_to_page(pmd);
if (!shadow_pmd) {
free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
return NULL;
}
page->lru.next = (void *) shadow_pmd;
}
for (i=0; i < PTRS_PER_PMD; i++)
pmd_clear(pmd + i);
return pmd;
} }
static inline void pmd_free (pmd_t *pmd) #define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
{ #define pud_free(x) do { } while (0)
pmd_t *shadow_pmd = get_shadow_pmd(pmd);
if (shadow_pmd) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER); {
free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); unsigned long *crst = crst_table_alloc(mm, s390_noexec);
if (crst)
crst_table_init(crst, _SEGMENT_ENTRY_EMPTY);
return (pmd_t *) crst;
} }
#define pmd_free(pmd) crst_table_free((unsigned long *) pmd)
#define __pmd_free_tlb(tlb,pmd) \ #define pgd_populate(mm, pgd, pud) BUG()
do { \ #define pgd_populate_kernel(mm, pgd, pud) BUG()
tlb_flush_mmu(tlb, 0, 0); \
pmd_free(pmd); \
} while (0)
static inline void static inline void pud_populate_kernel(struct mm_struct *mm,
pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) pud_t *pud, pmd_t *pmd)
{ {
pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd); pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
} }
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{ {
pgd_t *shadow_pgd = get_shadow_pgd(pgd); pud_t *shadow_pud = get_shadow_table(pud);
pmd_t *shadow_pmd = get_shadow_pmd(pmd); pmd_t *shadow_pmd = get_shadow_table(pmd);
if (shadow_pgd && shadow_pmd) if (shadow_pud && shadow_pmd)
pgd_populate_kernel(mm, shadow_pgd, shadow_pmd); pud_populate_kernel(mm, shadow_pud, shadow_pmd);
pgd_populate_kernel(mm, pgd, pmd); pud_populate_kernel(mm, pud, pmd);
} }
#endif /* __s390x__ */ #endif /* __s390x__ */
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
unsigned long *crst = crst_table_alloc(mm, s390_noexec);
if (crst)
crst_table_init(crst, pgd_entry_type(mm));
return (pgd_t *) crst;
}
#define pgd_free(pgd) crst_table_free((unsigned long *) pgd)
static inline void static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{ {
#ifndef __s390x__ #ifndef __s390x__
pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte); pmd_val(pmd[0]) = _SEGMENT_ENTRY + __pa(pte);
pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256); pmd_val(pmd[1]) = _SEGMENT_ENTRY + __pa(pte+256);
pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512); pmd_val(pmd[2]) = _SEGMENT_ENTRY + __pa(pte+512);
pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768); pmd_val(pmd[3]) = _SEGMENT_ENTRY + __pa(pte+768);
#else /* __s390x__ */ #else /* __s390x__ */
pmd_val(*pmd) = _PMD_ENTRY + __pa(pte); pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256); pmd_val1(*pmd) = _SEGMENT_ENTRY + __pa(pte+256);
#endif /* __s390x__ */ #endif /* __s390x__ */
} }
...@@ -160,7 +135,7 @@ static inline void ...@@ -160,7 +135,7 @@ static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
{ {
pte_t *pte = (pte_t *)page_to_phys(page); pte_t *pte = (pte_t *)page_to_phys(page);
pmd_t *shadow_pmd = get_shadow_pmd(pmd); pmd_t *shadow_pmd = get_shadow_table(pmd);
pte_t *shadow_pte = get_shadow_pte(pte); pte_t *shadow_pte = get_shadow_pte(pte);
pmd_populate_kernel(mm, pmd, pte); pmd_populate_kernel(mm, pmd, pte);
...@@ -171,67 +146,14 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) ...@@ -171,67 +146,14 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
/* /*
* page table entry allocation/free routines. * page table entry allocation/free routines.
*/ */
static inline pte_t * #define pte_alloc_one_kernel(mm, vmaddr) \
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr) ((pte_t *) page_table_alloc(s390_noexec))
{ #define pte_alloc_one(mm, vmaddr) \
pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT); virt_to_page(page_table_alloc(s390_noexec))
int i;
#define pte_free_kernel(pte) \
if (!pte) page_table_free((unsigned long *) pte)
return NULL; #define pte_free(pte) \
if (s390_noexec) { page_table_free((unsigned long *) page_to_phys((struct page *) pte))
pte_t *shadow_pte = (pte_t *)
__get_free_page(GFP_KERNEL|__GFP_REPEAT);
struct page *page = virt_to_page(pte);
if (!shadow_pte) {
free_page((unsigned long) pte);
return NULL;
}
page->lru.next = (void *) shadow_pte;
}
for (i=0; i < PTRS_PER_PTE; i++) {
pte_clear(mm, vmaddr, pte + i);
vmaddr += PAGE_SIZE;
}
return pte;
}
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
{
pte_t *pte = pte_alloc_one_kernel(mm, vmaddr);
if (pte)
return virt_to_page(pte);
return NULL;
}
static inline void pte_free_kernel(pte_t *pte)
{
pte_t *shadow_pte = get_shadow_pte(pte);
if (shadow_pte)
free_page((unsigned long) shadow_pte);
free_page((unsigned long) pte);
}
static inline void pte_free(struct page *pte)
{
struct page *shadow_page = get_shadow_page(pte);
if (shadow_page)
__free_page(shadow_page);
__free_page(pte);
}
#define __pte_free_tlb(tlb, pte) \
({ \
struct mmu_gather *__tlb = (tlb); \
struct page *__pte = (pte); \
struct page *shadow_page = get_shadow_page(__pte); \
if (shadow_page) \
tlb_remove_page(__tlb, shadow_page); \
tlb_remove_page(__tlb, __pte); \
})
#endif /* _S390_PGALLOC_H */ #endif /* _S390_PGALLOC_H */
This diff is collapsed.
...@@ -93,7 +93,6 @@ struct thread_struct { ...@@ -93,7 +93,6 @@ struct thread_struct {
s390_fp_regs fp_regs; s390_fp_regs fp_regs;
unsigned int acrs[NUM_ACRS]; unsigned int acrs[NUM_ACRS];
unsigned long ksp; /* kernel stack pointer */ unsigned long ksp; /* kernel stack pointer */
unsigned long user_seg; /* HSTD */
mm_segment_t mm_segment; mm_segment_t mm_segment;
unsigned long prot_addr; /* address of protection-excep. */ unsigned long prot_addr; /* address of protection-excep. */
unsigned int error_code; /* error-code of last prog-excep. */ unsigned int error_code; /* error-code of last prog-excep. */
...@@ -128,21 +127,8 @@ struct stack_frame { ...@@ -128,21 +127,8 @@ struct stack_frame {
#define ARCH_MIN_TASKALIGN 8 #define ARCH_MIN_TASKALIGN 8
#ifndef __s390x__ #define INIT_THREAD { \
# define __SWAPPER_PG_DIR __pa(&swapper_pg_dir[0]) + _SEGMENT_TABLE .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
#else /* __s390x__ */
# define __SWAPPER_PG_DIR __pa(&swapper_pg_dir[0]) + _REGION_TABLE
#endif /* __s390x__ */
#define INIT_THREAD {{0,{{0},{0},{0},{0},{0},{0},{0},{0},{0},{0}, \
{0},{0},{0},{0},{0},{0}}}, \
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, \
sizeof(init_stack) + (unsigned long) &init_stack, \
__SWAPPER_PG_DIR, \
{0}, \
0,0,0, \
(per_struct) {{{{0,}}},0,0,0,0,{{0,}}}, \
0, 0 \
} }
/* /*
......
...@@ -2,19 +2,130 @@ ...@@ -2,19 +2,130 @@
#define _S390_TLB_H #define _S390_TLB_H
/* /*
* s390 doesn't need any special per-pte or * TLB flushing on s390 is complicated. The following requirement
* per-vma handling.. * from the principles of operation is the most arduous:
*
* "A valid table entry must not be changed while it is attached
* to any CPU and may be used for translation by that CPU except to
* (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
* or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
* table entry, or (3) make a change by means of a COMPARE AND SWAP
* AND PURGE instruction that purges the TLB."
*
* The modification of a pte of an active mm struct therefore is
* a two step process: i) invalidate the pte, ii) store the new pte.
* This is true for the page protection bit as well.
* The only possible optimization is to flush at the beginning of
* a tlb_gather_mmu cycle if the mm_struct is currently not in use.
*
* Pages used for the page tables is a different story. FIXME: more
*/ */
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0) #include <linux/mm.h>
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) #include <linux/swap.h>
#include <asm/processor.h>
#include <asm/pgalloc.h>
#include <asm/smp.h>
#include <asm/tlbflush.h>
#ifndef CONFIG_SMP
#define TLB_NR_PTRS 1
#else
#define TLB_NR_PTRS 508
#endif
struct mmu_gather {
struct mm_struct *mm;
unsigned int fullmm;
unsigned int nr_ptes;
unsigned int nr_pmds;
void *array[TLB_NR_PTRS];
};
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm,
unsigned int full_mm_flush)
{
struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
tlb->mm = mm;
tlb->fullmm = full_mm_flush || (num_online_cpus() == 1) ||
(atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm);
tlb->nr_ptes = 0;
tlb->nr_pmds = TLB_NR_PTRS;
if (tlb->fullmm)
__tlb_flush_mm(mm);
return tlb;
}
static inline void tlb_flush_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end)
{
if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pmds < TLB_NR_PTRS))
__tlb_flush_mm(tlb->mm);
while (tlb->nr_ptes > 0)
pte_free(tlb->array[--tlb->nr_ptes]);
while (tlb->nr_pmds < TLB_NR_PTRS)
pmd_free((pmd_t *) tlb->array[tlb->nr_pmds++]);
}
static inline void tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end)
{
tlb_flush_mmu(tlb, start, end);
/* keep the page table cache within bounds */
check_pgt_cache();
put_cpu_var(mmu_gathers);
}
/* /*
* .. because we flush the whole mm when it * Release the page cache reference for a pte removed by
* fills up. * tlb_ptep_clear_flush. In both flush modes the tlb fo a page cache page
* has already been freed, so just do free_page_and_swap_cache.
*/ */
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
free_page_and_swap_cache(page);
}
#include <asm-generic/tlb.h> /*
* pte_free_tlb frees a pte table and clears the CRSTE for the
* page table from the tlb.
*/
static inline void pte_free_tlb(struct mmu_gather *tlb, struct page *page)
{
if (!tlb->fullmm) {
tlb->array[tlb->nr_ptes++] = page;
if (tlb->nr_ptes >= tlb->nr_pmds)
tlb_flush_mmu(tlb, 0, 0);
} else
pte_free(page);
}
/*
* pmd_free_tlb frees a pmd table and clears the CRSTE for the
* segment table entry from the tlb.
*/
static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
{
#ifdef __s390x__
if (!tlb->fullmm) {
tlb->array[--tlb->nr_pmds] = (struct page *) pmd;
if (tlb->nr_ptes >= tlb->nr_pmds)
tlb_flush_mmu(tlb, 0, 0);
} else
pmd_free(pmd);
#endif #endif
}
#define pud_free_tlb(tlb, pud) do { } while (0)
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
#define tlb_migrate_finish(mm) do { } while (0)
#endif /* _S390_TLB_H */
...@@ -6,68 +6,19 @@ ...@@ -6,68 +6,19 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
/* /*
* TLB flushing: * Flush all tlb entries on the local cpu.
*
* - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
*/
/*
* S/390 has three ways of flushing TLBs
* 'ptlb' does a flush of the local processor
* 'csp' flushes the TLBs on all PUs of a SMP
* 'ipte' invalidates a pte in a page table and flushes that out of
* the TLBs of all PUs of a SMP
*/
#define local_flush_tlb() \
do { asm volatile("ptlb": : :"memory"); } while (0)
#ifndef CONFIG_SMP
/*
* We always need to flush, since s390 does not flush tlb
* on each context switch
*/ */
static inline void __tlb_flush_local(void)
static inline void flush_tlb(void)
{
local_flush_tlb();
}
static inline void flush_tlb_all(void)
{ {
local_flush_tlb(); asm volatile("ptlb" : : : "memory");
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
local_flush_tlb();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
local_flush_tlb();
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
local_flush_tlb();
} }
#define flush_tlb_kernel_range(start, end) \ /*
local_flush_tlb(); * Flush all tlb entries on all cpus.
*/
#else static inline void __tlb_flush_global(void)
#include <asm/smp.h>
extern void smp_ptlb_all(void);
static inline void global_flush_tlb(void)
{ {
extern void smp_ptlb_all(void);
register unsigned long reg2 asm("2"); register unsigned long reg2 asm("2");
register unsigned long reg3 asm("3"); register unsigned long reg3 asm("3");
register unsigned long reg4 asm("4"); register unsigned long reg4 asm("4");
...@@ -89,66 +40,75 @@ static inline void global_flush_tlb(void) ...@@ -89,66 +40,75 @@ static inline void global_flush_tlb(void)
} }
/* /*
* We only have to do global flush of tlb if process run since last * Flush all tlb entries of a page table on all cpus.
* flush on any other pu than current.
* If we have threads (mm->count > 1) we always do a global flush,
* since the process runs on more than one processor at the same time.
*/ */
static inline void __tlb_flush_idte(pgd_t *pgd)
{
asm volatile(
" .insn rrf,0xb98e0000,0,%0,%1,0"
: : "a" (2048), "a" (__pa(pgd) & PAGE_MASK) : "cc" );
}
static inline void __flush_tlb_mm(struct mm_struct * mm) static inline void __tlb_flush_mm(struct mm_struct * mm)
{ {
cpumask_t local_cpumask; cpumask_t local_cpumask;
if (unlikely(cpus_empty(mm->cpu_vm_mask))) if (unlikely(cpus_empty(mm->cpu_vm_mask)))
return; return;
/*
* If the machine has IDTE we prefer to do a per mm flush
* on all cpus instead of doing a local flush if the mm
* only ran on the local cpu.
*/
if (MACHINE_HAS_IDTE) { if (MACHINE_HAS_IDTE) {
pgd_t *shadow_pgd = get_shadow_pgd(mm->pgd); pgd_t *shadow_pgd = get_shadow_table(mm->pgd);
if (shadow_pgd) { if (shadow_pgd)
asm volatile( __tlb_flush_idte(shadow_pgd);
" .insn rrf,0xb98e0000,0,%0,%1,0" __tlb_flush_idte(mm->pgd);
: : "a" (2048),
"a" (__pa(shadow_pgd) & PAGE_MASK) : "cc" );
}
asm volatile(
" .insn rrf,0xb98e0000,0,%0,%1,0"
: : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc");
return; return;
} }
preempt_disable(); preempt_disable();
/*
* If the process only ran on the local cpu, do a local flush.
*/
local_cpumask = cpumask_of_cpu(smp_processor_id()); local_cpumask = cpumask_of_cpu(smp_processor_id());
if (cpus_equal(mm->cpu_vm_mask, local_cpumask)) if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
local_flush_tlb(); __tlb_flush_local();
else else
global_flush_tlb(); __tlb_flush_global();
preempt_enable(); preempt_enable();
} }
static inline void flush_tlb(void) static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
{
__flush_tlb_mm(current->mm);
}
static inline void flush_tlb_all(void)
{
global_flush_tlb();
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
__flush_tlb_mm(mm);
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
__flush_tlb_mm(vma->vm_mm);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{ {
__flush_tlb_mm(vma->vm_mm); if (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm)
__tlb_flush_mm(mm);
} }
#define flush_tlb_kernel_range(start, end) global_flush_tlb() /*
* TLB flushing:
* flush_tlb() - flushes the current mm struct TLBs
* flush_tlb_all() - flushes all processes TLBs
* flush_tlb_mm(mm) - flushes the specified mm context TLB's
* flush_tlb_page(vma, vmaddr) - flushes one page
* flush_tlb_range(vma, start, end) - flushes a range of pages
* flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
*/
#endif /*
* flush_tlb_mm goes together with ptep_set_wrprotect for the
* copy_page_range operation and flush_tlb_range is related to
* ptep_get_and_clear for change_protection. ptep_set_wrprotect and
* ptep_get_and_clear do not flush the TLBs directly if the mm has
* only one user. At the end of the update the flush_tlb_mm and
* flush_tlb_range functions need to do the flush.
*/
#define flush_tlb() do { } while (0)
#define flush_tlb_all() do { } while (0)
#define flush_tlb_mm(mm) __tlb_flush_mm_cond(mm)
#define flush_tlb_page(vma, addr) do { } while (0)
#define flush_tlb_range(vma, start, end) __tlb_flush_mm_cond(mm)
#define flush_tlb_kernel_range(start, end) __tlb_flush_mm(&init_mm)
#endif /* _S390_TLBFLUSH_H */ #endif /* _S390_TLBFLUSH_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment