diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index fda46bd38c99a7b529ce71925e862efdc3c88a5d..69cf5b5eddc95dcb83372e10f444df01825583c9 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -1,12 +1,25 @@
 #ifndef _ASM_S390_FUTEX_H
 #define _ASM_S390_FUTEX_H
 
-#include <linux/futex.h>
 #include <linux/uaccess.h>
+#include <linux/futex.h>
+#include <asm/mmu_context.h>
 #include <asm/errno.h>
 
-int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval);
-int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old);
+#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg)	\
+	asm volatile(							\
+		"   sacf  256\n"					\
+		"0: l     %1,0(%6)\n"					\
+		"1:"insn						\
+		"2: cs    %1,%2,0(%6)\n"				\
+		"3: jl    1b\n"						\
+		"   lhi   %0,0\n"					\
+		"4: sacf  768\n"					\
+		EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b)		\
+		: "=d" (ret), "=&d" (oldval), "=&d" (newval),		\
+		  "=m" (*uaddr)						\
+		: "0" (-EFAULT), "d" (oparg), "a" (uaddr),		\
+		  "m" (*uaddr) : "cc");
 
 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
 {
@@ -14,13 +27,37 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
 	int cmp = (encoded_op >> 24) & 15;
 	int oparg = (encoded_op << 8) >> 20;
 	int cmparg = (encoded_op << 20) >> 20;
-	int oldval, ret;
+	int oldval = 0, newval, ret;
 
+	update_primary_asce(current);
 	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
 		oparg = 1 << oparg;
 
 	pagefault_disable();
-	ret = __futex_atomic_op_inuser(op, uaddr, oparg, &oldval);
+	switch (op) {
+	case FUTEX_OP_SET:
+		__futex_atomic_op("lr %2,%5\n",
+				  ret, oldval, newval, uaddr, oparg);
+		break;
+	case FUTEX_OP_ADD:
+		__futex_atomic_op("lr %2,%1\nar %2,%5\n",
+				  ret, oldval, newval, uaddr, oparg);
+		break;
+	case FUTEX_OP_OR:
+		__futex_atomic_op("lr %2,%1\nor %2,%5\n",
+				  ret, oldval, newval, uaddr, oparg);
+		break;
+	case FUTEX_OP_ANDN:
+		__futex_atomic_op("lr %2,%1\nnr %2,%5\n",
+				  ret, oldval, newval, uaddr, oparg);
+		break;
+	case FUTEX_OP_XOR:
+		__futex_atomic_op("lr %2,%1\nxr %2,%5\n",
+				  ret, oldval, newval, uaddr, oparg);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
 	pagefault_enable();
 
 	if (!ret) {
@@ -37,4 +74,23 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
 	return ret;
 }
 
+static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+						u32 oldval, u32 newval)
+{
+	int ret;
+
+	update_primary_asce(current);
+	asm volatile(
+		"   sacf 256\n"
+		"0: cs   %1,%4,0(%5)\n"
+		"1: la   %0,0\n"
+		"2: sacf 768\n"
+		EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+		: "=d" (ret), "+d" (oldval), "=m" (*uaddr)
+		: "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
+		: "cc", "memory");
+	*uval = oldval;
+	return ret;
+}
+
 #endif /* _ASM_S390_FUTEX_H */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 71a258839039cb1536955ce7b51551698d9a35a0..71be346d0e3c8074d7be6542815610567ff66606 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -30,27 +30,33 @@ static inline int init_new_context(struct task_struct *tsk,
 
 #define destroy_context(mm)             do { } while (0)
 
-#ifndef CONFIG_64BIT
-#define LCTL_OPCODE "lctl"
-#else
-#define LCTL_OPCODE "lctlg"
-#endif
-
-static inline void update_user_asce(struct mm_struct *mm)
+static inline void update_user_asce(struct mm_struct *mm, int load_primary)
 {
 	pgd_t *pgd = mm->pgd;
 
 	S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
-	/* Load primary space page table origin. */
-	asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce));
+	if (load_primary)
+		__ctl_load(S390_lowcore.user_asce, 1, 1);
 	set_fs(current->thread.mm_segment);
 }
 
-static inline void clear_user_asce(struct mm_struct *mm)
+static inline void clear_user_asce(struct mm_struct *mm, int load_primary)
 {
 	S390_lowcore.user_asce = S390_lowcore.kernel_asce;
-	asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce));
-	asm volatile(LCTL_OPCODE" 7,7,%0\n" : : "m" (S390_lowcore.user_asce));
+
+	if (load_primary)
+		__ctl_load(S390_lowcore.user_asce, 1, 1);
+	__ctl_load(S390_lowcore.user_asce, 7, 7);
+}
+
+static inline void update_primary_asce(struct task_struct *tsk)
+{
+	unsigned long asce;
+
+	__ctl_store(asce, 1, 1);
+	if (asce != S390_lowcore.kernel_asce)
+		__ctl_load(S390_lowcore.kernel_asce, 1, 1);
+	set_tsk_thread_flag(tsk, TIF_ASCE);
 }
 
 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
@@ -58,6 +64,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 {
 	int cpu = smp_processor_id();
 
+	update_primary_asce(tsk);
 	if (prev == next)
 		return;
 	if (MACHINE_HAS_TLB_LC)
@@ -66,10 +73,10 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 		/* Delay update_user_asce until all TLB flushes are done. */
 		set_tsk_thread_flag(tsk, TIF_TLB_WAIT);
 		/* Clear old ASCE by loading the kernel ASCE. */
-		clear_user_asce(next);
+		clear_user_asce(next, 0);
 	} else {
 		cpumask_set_cpu(cpu, mm_cpumask(next));
-		update_user_asce(next);
+		update_user_asce(next, 0);
 		if (next->context.flush_mm)
 			/* Flush pending TLBs */
 			__tlb_flush_mm(next);
@@ -94,7 +101,7 @@ static inline void finish_arch_post_lock_switch(void)
 		cpu_relax();
 
 	cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
-	update_user_asce(mm);
+	update_user_asce(mm, 0);
 	if (mm->context.flush_mm)
 		__tlb_flush_mm(mm);
 	preempt_enable();
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 29c81f82705e139dc53a9af3f72b0db3d9e14695..e759181357fc5823c490696c23d047878c4ec753 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -132,6 +132,7 @@ static inline void restore_access_regs(unsigned int *acrs)
 		update_cr_regs(next);					\
 	}								\
 	prev = __switch_to(prev,next);					\
+	update_primary_asce(current);					\
 } while (0)
 
 #define finish_arch_switch(prev) do {					     \
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 3ccd71b903454a667ec116a21fa0678a6f80dafe..50630e6a35de394688ac59207a5c523370493388 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -82,6 +82,7 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_SIGPENDING		2	/* signal pending */
 #define TIF_NEED_RESCHED	3	/* rescheduling necessary */
 #define TIF_TLB_WAIT		4	/* wait for TLB flush completion */
+#define TIF_ASCE		5	/* primary asce needs fixup / uaccess */
 #define TIF_PER_TRAP		6	/* deliver sigtrap on return to user */
 #define TIF_MCCK_PENDING	7	/* machine check handling is pending */
 #define TIF_SYSCALL_TRACE	8	/* syscall trace active */
@@ -99,6 +100,7 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
 #define _TIF_TLB_WAIT		(1<<TIF_TLB_WAIT)
+#define _TIF_ASCE		(1<<TIF_ASCE)
 #define _TIF_PER_TRAP		(1<<TIF_PER_TRAP)
 #define _TIF_MCCK_PENDING	(1<<TIF_MCCK_PENDING)
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 4133b3f72fb09a04c9f640cd214ef4a21a69c9db..1be64a1506d0164593bafc6fdc847570813b1934 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -92,8 +92,6 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
 #define ARCH_HAS_SORT_EXTABLE
 #define ARCH_HAS_SEARCH_EXTABLE
 
-int __handle_fault(unsigned long, unsigned long, int);
-
 /**
  * __copy_from_user: - Copy a block of data from user space, with less checking.
  * @to:   Destination address, in kernel space.
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index e4c99a1836511b16b90de5b4591bea5dc9fc8e69..cc10cdd4d6a24ccbb9d02d12154da7eae25cb076 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -136,6 +136,7 @@ int main(void)
 	DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn));
 	DEFINE(__LC_RESTART_DATA, offsetof(struct _lowcore, restart_data));
 	DEFINE(__LC_RESTART_SOURCE, offsetof(struct _lowcore, restart_source));
+	DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce));
 	DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
 	DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
 	DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 526d3735ed29050d317ef1327039397a4594d71f..1662038516c0db29d59a4a87dce89f374428cd51 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -38,9 +38,9 @@ __PT_R14     =	__PT_GPRS + 56
 __PT_R15     =	__PT_GPRS + 60
 
 _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
-		 _TIF_MCCK_PENDING | _TIF_PER_TRAP )
+		 _TIF_MCCK_PENDING | _TIF_PER_TRAP | _TIF_ASCE)
 _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
-		 _TIF_MCCK_PENDING)
+		 _TIF_MCCK_PENDING | _TIF_ASCE)
 _TIF_TRACE    = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
 		 _TIF_SYSCALL_TRACEPOINT)
 _TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT)
@@ -241,6 +241,8 @@ sysc_work:
 	jo	sysc_sigpending
 	tm	__TI_flags+3(%r12),_TIF_NOTIFY_RESUME
 	jo	sysc_notify_resume
+	tm	__TI_flags+3(%r12),_TIF_ASCE
+	jo	sysc_uaccess
 	j	sysc_return		# beware of critical section cleanup
 
 #
@@ -259,6 +261,14 @@ sysc_mcck_pending:
 	la	%r14,BASED(sysc_return)
 	br	%r1			# TIF bit will be cleared by handler
 
+#
+# _TIF_ASCE is set, load user space asce
+#
+sysc_uaccess:
+	ni	__TI_flags+3(%r12),255-_TIF_ASCE
+	lctl	%c1,%c1,__LC_USER_ASCE	# load primary asce
+	j	sysc_return
+
 #
 # _TIF_SIGPENDING is set, call do_signal
 #
@@ -522,6 +532,8 @@ io_work_tif:
 	jo	io_sigpending
 	tm	__TI_flags+3(%r12),_TIF_NOTIFY_RESUME
 	jo	io_notify_resume
+	tm	__TI_flags+3(%r12),_TIF_ASCE
+	jo	io_uaccess
 	j	io_return		# beware of critical section cleanup
 
 #
@@ -534,6 +546,14 @@ io_mcck_pending:
 	TRACE_IRQS_OFF
 	j	io_return
 
+#
+# _TIF_ASCE is set, load user space asce
+#
+io_uaccess:
+	ni	__TI_flags+3(%r12),255-_TIF_ASCE
+	lctl	%c1,%c1,__LC_USER_ASCE	# load primary asce
+	j	io_return
+
 #
 # _TIF_NEED_RESCHED is set, call schedule
 #
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index e09dbe5f29015a9fc794cddb2f7ff57053594cdc..5963e43618bb0df3ca790ffa1ce65a9fefd86b0b 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -43,9 +43,9 @@ STACK_SIZE  = 1 << STACK_SHIFT
 STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
 
 _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
-		 _TIF_MCCK_PENDING | _TIF_PER_TRAP )
+		 _TIF_MCCK_PENDING | _TIF_PER_TRAP | _TIF_ASCE)
 _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
-		 _TIF_MCCK_PENDING)
+		 _TIF_MCCK_PENDING | _TIF_ASCE)
 _TIF_TRACE    = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
 		 _TIF_SYSCALL_TRACEPOINT)
 _TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT)
@@ -275,6 +275,8 @@ sysc_work:
 	jo	sysc_sigpending
 	tm	__TI_flags+7(%r12),_TIF_NOTIFY_RESUME
 	jo	sysc_notify_resume
+	tm	__TI_flags+7(%r12),_TIF_ASCE
+	jo	sysc_uaccess
 	j	sysc_return		# beware of critical section cleanup
 
 #
@@ -291,6 +293,14 @@ sysc_mcck_pending:
 	larl	%r14,sysc_return
 	jg	s390_handle_mcck	# TIF bit will be cleared by handler
 
+#
+# _TIF_ASCE is set, load user space asce
+#
+sysc_uaccess:
+	ni	__TI_flags+7(%r12),255-_TIF_ASCE
+	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
+	j	sysc_return
+
 #
 # _TIF_SIGPENDING is set, call do_signal
 #
@@ -559,6 +569,8 @@ io_work_tif:
 	jo	io_sigpending
 	tm	__TI_flags+7(%r12),_TIF_NOTIFY_RESUME
 	jo	io_notify_resume
+	tm	__TI_flags+7(%r12),_TIF_ASCE
+	jo	io_uaccess
 	j	io_return		# beware of critical section cleanup
 
 #
@@ -570,6 +582,14 @@ io_mcck_pending:
 	TRACE_IRQS_OFF
 	j	io_return
 
+#
+# _TIF_ASCE is set, load user space asce
+#
+io_uaccess:
+	ni	__TI_flags+7(%r12),255-_TIF_ASCE
+	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
+	j	io_return
+
 #
 # _TIF_NEED_RESCHED is set, call schedule
 #
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index e3fffe1dff513a05ec2839114b1952c65801721c..c6d752e8bf28e2dbee2839915b46d10107eb047b 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -2,7 +2,7 @@
 # Makefile for s390-specific library files..
 #
 
-lib-y += delay.o string.o uaccess_pt.o uaccess_mvcos.o find.o
+lib-y += delay.o string.o uaccess.o find.o
 obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
 obj-$(CONFIG_64BIT) += mem64.o
 lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
new file mode 100644
index 0000000000000000000000000000000000000000..23f866b4c7f1f3564747c8e1251ea3a05bae420d
--- /dev/null
+++ b/arch/s390/lib/uaccess.c
@@ -0,0 +1,407 @@
+/*
+ *  Standard user space access functions based on mvcp/mvcs and doing
+ *  interesting things in the secondary space mode.
+ *
+ *    Copyright IBM Corp. 2006,2014
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *		 Gerald Schaefer (gerald.schaefer@de.ibm.com)
+ */
+
+#include <linux/jump_label.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <asm/mmu_context.h>
+#include <asm/facility.h>
+
+#ifndef CONFIG_64BIT
+#define AHI	"ahi"
+#define ALR	"alr"
+#define CLR	"clr"
+#define LHI	"lhi"
+#define SLR	"slr"
+#else
+#define AHI	"aghi"
+#define ALR	"algr"
+#define CLR	"clgr"
+#define LHI	"lghi"
+#define SLR	"slgr"
+#endif
+
+static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE;
+
+static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
+						 unsigned long size)
+{
+	register unsigned long reg0 asm("0") = 0x81UL;
+	unsigned long tmp1, tmp2;
+
+	tmp1 = -4096UL;
+	asm volatile(
+		"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
+		"9: jz    7f\n"
+		"1:"ALR"  %0,%3\n"
+		"  "SLR"  %1,%3\n"
+		"  "SLR"  %2,%3\n"
+		"   j     0b\n"
+		"2: la    %4,4095(%1)\n"/* %4 = ptr + 4095 */
+		"   nr    %4,%3\n"	/* %4 = (ptr + 4095) & -4096 */
+		"  "SLR"  %4,%1\n"
+		"  "CLR"  %0,%4\n"	/* copy crosses next page boundary? */
+		"   jnh   4f\n"
+		"3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
+		"10:"SLR"  %0,%4\n"
+		"  "ALR"  %2,%4\n"
+		"4:"LHI"  %4,-1\n"
+		"  "ALR"  %4,%0\n"	/* copy remaining size, subtract 1 */
+		"   bras  %3,6f\n"	/* memset loop */
+		"   xc    0(1,%2),0(%2)\n"
+		"5: xc    0(256,%2),0(%2)\n"
+		"   la    %2,256(%2)\n"
+		"6:"AHI"  %4,-256\n"
+		"   jnm   5b\n"
+		"   ex    %4,0(%3)\n"
+		"   j     8f\n"
+		"7:"SLR"  %0,%0\n"
+		"8:\n"
+		EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b)
+		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
+		: "d" (reg0) : "cc", "memory");
+	return size;
+}
+
+static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
+						unsigned long size)
+{
+	unsigned long tmp1, tmp2;
+
+	update_primary_asce(current);
+	tmp1 = -256UL;
+	asm volatile(
+		"   sacf  0\n"
+		"0: mvcp  0(%0,%2),0(%1),%3\n"
+		"10:jz    8f\n"
+		"1:"ALR"  %0,%3\n"
+		"   la    %1,256(%1)\n"
+		"   la    %2,256(%2)\n"
+		"2: mvcp  0(%0,%2),0(%1),%3\n"
+		"11:jnz   1b\n"
+		"   j     8f\n"
+		"3: la    %4,255(%1)\n"	/* %4 = ptr + 255 */
+		"  "LHI"  %3,-4096\n"
+		"   nr    %4,%3\n"	/* %4 = (ptr + 255) & -4096 */
+		"  "SLR"  %4,%1\n"
+		"  "CLR"  %0,%4\n"	/* copy crosses next page boundary? */
+		"   jnh   5f\n"
+		"4: mvcp  0(%4,%2),0(%1),%3\n"
+		"12:"SLR"  %0,%4\n"
+		"  "ALR"  %2,%4\n"
+		"5:"LHI"  %4,-1\n"
+		"  "ALR"  %4,%0\n"	/* copy remaining size, subtract 1 */
+		"   bras  %3,7f\n"	/* memset loop */
+		"   xc    0(1,%2),0(%2)\n"
+		"6: xc    0(256,%2),0(%2)\n"
+		"   la    %2,256(%2)\n"
+		"7:"AHI"  %4,-256\n"
+		"   jnm   6b\n"
+		"   ex    %4,0(%3)\n"
+		"   j     9f\n"
+		"8:"SLR"  %0,%0\n"
+		"9: sacf  768\n"
+		EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
+		EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
+		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
+		: : "cc", "memory");
+	return size;
+}
+
+unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	if (static_key_false(&have_mvcos))
+		return copy_from_user_mvcos(to, from, n);
+	return copy_from_user_mvcp(to, from, n);
+}
+EXPORT_SYMBOL(__copy_from_user);
+
+static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
+					       unsigned long size)
+{
+	register unsigned long reg0 asm("0") = 0x810000UL;
+	unsigned long tmp1, tmp2;
+
+	tmp1 = -4096UL;
+	asm volatile(
+		"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
+		"6: jz    4f\n"
+		"1:"ALR"  %0,%3\n"
+		"  "SLR"  %1,%3\n"
+		"  "SLR"  %2,%3\n"
+		"   j     0b\n"
+		"2: la    %4,4095(%1)\n"/* %4 = ptr + 4095 */
+		"   nr    %4,%3\n"	/* %4 = (ptr + 4095) & -4096 */
+		"  "SLR"  %4,%1\n"
+		"  "CLR"  %0,%4\n"	/* copy crosses next page boundary? */
+		"   jnh   5f\n"
+		"3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
+		"7:"SLR"  %0,%4\n"
+		"   j     5f\n"
+		"4:"SLR"  %0,%0\n"
+		"5:\n"
+		EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
+		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
+		: "d" (reg0) : "cc", "memory");
+	return size;
+}
+
+static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
+					      unsigned long size)
+{
+	unsigned long tmp1, tmp2;
+
+	update_primary_asce(current);
+	tmp1 = -256UL;
+	asm volatile(
+		"   sacf  0\n"
+		"0: mvcs  0(%0,%1),0(%2),%3\n"
+		"7: jz    5f\n"
+		"1:"ALR"  %0,%3\n"
+		"   la    %1,256(%1)\n"
+		"   la    %2,256(%2)\n"
+		"2: mvcs  0(%0,%1),0(%2),%3\n"
+		"8: jnz   1b\n"
+		"   j     5f\n"
+		"3: la    %4,255(%1)\n" /* %4 = ptr + 255 */
+		"  "LHI"  %3,-4096\n"
+		"   nr    %4,%3\n"	/* %4 = (ptr + 255) & -4096 */
+		"  "SLR"  %4,%1\n"
+		"  "CLR"  %0,%4\n"	/* copy crosses next page boundary? */
+		"   jnh   6f\n"
+		"4: mvcs  0(%4,%1),0(%2),%3\n"
+		"9:"SLR"  %0,%4\n"
+		"   j     6f\n"
+		"5:"SLR"  %0,%0\n"
+		"6: sacf  768\n"
+		EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
+		EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
+		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
+		: : "cc", "memory");
+	return size;
+}
+
+unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	if (static_key_false(&have_mvcos))
+		return copy_to_user_mvcos(to, from, n);
+	return copy_to_user_mvcs(to, from, n);
+}
+EXPORT_SYMBOL(__copy_to_user);
+
+static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
+					       unsigned long size)
+{
+	register unsigned long reg0 asm("0") = 0x810081UL;
+	unsigned long tmp1, tmp2;
+
+	tmp1 = -4096UL;
+	/* FIXME: copy with reduced length. */
+	asm volatile(
+		"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
+		"   jz	  2f\n"
+		"1:"ALR"  %0,%3\n"
+		"  "SLR"  %1,%3\n"
+		"  "SLR"  %2,%3\n"
+		"   j	  0b\n"
+		"2:"SLR"  %0,%0\n"
+		"3: \n"
+		EX_TABLE(0b,3b)
+		: "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
+		: "d" (reg0) : "cc", "memory");
+	return size;
+}
+
+static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from,
+					     unsigned long size)
+{
+	unsigned long tmp1;
+
+	update_primary_asce(current);
+	asm volatile(
+		"   sacf  256\n"
+		"  "AHI"  %0,-1\n"
+		"   jo	  5f\n"
+		"   bras  %3,3f\n"
+		"0:"AHI"  %0,257\n"
+		"1: mvc	  0(1,%1),0(%2)\n"
+		"   la	  %1,1(%1)\n"
+		"   la	  %2,1(%2)\n"
+		"  "AHI"  %0,-1\n"
+		"   jnz	  1b\n"
+		"   j	  5f\n"
+		"2: mvc	  0(256,%1),0(%2)\n"
+		"   la	  %1,256(%1)\n"
+		"   la	  %2,256(%2)\n"
+		"3:"AHI"  %0,-256\n"
+		"   jnm	  2b\n"
+		"4: ex	  %0,1b-0b(%3)\n"
+		"5: "SLR"  %0,%0\n"
+		"6: sacf  768\n"
+		EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
+		: "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
+		: : "cc", "memory");
+	return size;
+}
+
+unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n)
+{
+	if (static_key_false(&have_mvcos))
+		return copy_in_user_mvcos(to, from, n);
+	return copy_in_user_mvc(to, from, n);
+}
+EXPORT_SYMBOL(__copy_in_user);
+
+static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
+{
+	register unsigned long reg0 asm("0") = 0x810000UL;
+	unsigned long tmp1, tmp2;
+
+	tmp1 = -4096UL;
+	asm volatile(
+		"0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
+		"   jz	  4f\n"
+		"1:"ALR"  %0,%2\n"
+		"  "SLR"  %1,%2\n"
+		"   j	  0b\n"
+		"2: la	  %3,4095(%1)\n"/* %4 = to + 4095 */
+		"   nr	  %3,%2\n"	/* %4 = (to + 4095) & -4096 */
+		"  "SLR"  %3,%1\n"
+		"  "CLR"  %0,%3\n"	/* copy crosses next page boundary? */
+		"   jnh	  5f\n"
+		"3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
+		"  "SLR"  %0,%3\n"
+		"   j	  5f\n"
+		"4:"SLR"  %0,%0\n"
+		"5:\n"
+		EX_TABLE(0b,2b) EX_TABLE(3b,5b)
+		: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
+		: "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
+	return size;
+}
+
+static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
+{
+	unsigned long tmp1, tmp2;
+
+	update_primary_asce(current);
+	asm volatile(
+		"   sacf  256\n"
+		"  "AHI"  %0,-1\n"
+		"   jo    5f\n"
+		"   bras  %3,3f\n"
+		"   xc    0(1,%1),0(%1)\n"
+		"0:"AHI"  %0,257\n"
+		"   la    %2,255(%1)\n" /* %2 = ptr + 255 */
+		"   srl   %2,12\n"
+		"   sll   %2,12\n"	/* %2 = (ptr + 255) & -4096 */
+		"  "SLR"  %2,%1\n"
+		"  "CLR"  %0,%2\n"	/* clear crosses next page boundary? */
+		"   jnh   5f\n"
+		"  "AHI"  %2,-1\n"
+		"1: ex    %2,0(%3)\n"
+		"  "AHI"  %2,1\n"
+		"  "SLR"  %0,%2\n"
+		"   j     5f\n"
+		"2: xc    0(256,%1),0(%1)\n"
+		"   la    %1,256(%1)\n"
+		"3:"AHI"  %0,-256\n"
+		"   jnm   2b\n"
+		"4: ex    %0,0(%3)\n"
+		"5: "SLR"  %0,%0\n"
+		"6: sacf  768\n"
+		EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
+		: "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
+		: : "cc", "memory");
+	return size;
+}
+
+unsigned long __clear_user(void __user *to, unsigned long size)
+{
+	if (static_key_false(&have_mvcos))
+			return clear_user_mvcos(to, size);
+	return clear_user_xc(to, size);
+}
+EXPORT_SYMBOL(__clear_user);
+
+static inline unsigned long strnlen_user_srst(const char __user *src,
+					      unsigned long size)
+{
+	register unsigned long reg0 asm("0") = 0;
+	unsigned long tmp1, tmp2;
+
+	if (unlikely(!size))
+		return 0;
+	update_primary_asce(current);
+	asm volatile(
+		"   la    %2,0(%1)\n"
+		"   la    %3,0(%0,%1)\n"
+		"  "SLR"  %0,%0\n"
+		"   sacf  256\n"
+		"0: srst  %3,%2\n"
+		"   jo    0b\n"
+		"   la    %0,1(%3)\n"	/* strnlen_user results includes \0 */
+		"  "SLR"  %0,%1\n"
+		"1: sacf  768\n"
+		EX_TABLE(0b,1b)
+		: "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
+		: "d" (reg0) : "cc", "memory");
+	return size;
+}
+
+unsigned long __strnlen_user(const char __user *src, unsigned long size)
+{
+	update_primary_asce(current);
+	return strnlen_user_srst(src, size);
+}
+EXPORT_SYMBOL(__strnlen_user);
+
+long __strncpy_from_user(char *dst, const char __user *src, long size)
+{
+	size_t done, len, offset, len_str;
+
+	if (unlikely(size <= 0))
+		return 0;
+	done = 0;
+	do {
+		offset = (size_t)src & ~PAGE_MASK;
+		len = min(size - done, PAGE_SIZE - offset);
+		if (copy_from_user(dst, src, len))
+			return -EFAULT;
+		len_str = strnlen(dst, len);
+		done += len_str;
+		src += len_str;
+		dst += len_str;
+	} while ((len_str == len) && (done < size));
+	return done;
+}
+EXPORT_SYMBOL(__strncpy_from_user);
+
+/*
+ * The "old" uaccess variant without mvcos can be enforced with the
+ * uaccess_primary kernel parameter. This is mainly for debugging purposes.
+ */
+static int uaccess_primary __initdata;
+
+static int __init parse_uaccess_pt(char *__unused)
+{
+	uaccess_primary = 1;
+	return 0;
+}
+early_param("uaccess_primary", parse_uaccess_pt);
+
+static int __init uaccess_init(void)
+{
+	if (IS_ENABLED(CONFIG_64BIT) && !uaccess_primary && test_facility(27))
+		static_key_slow_inc(&have_mvcos);
+	return 0;
+}
+early_initcall(uaccess_init);
diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h
deleted file mode 100644
index c7e0e81f4b4ebf73cf15664f048301faaffccabb..0000000000000000000000000000000000000000
--- a/arch/s390/lib/uaccess.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- *    Copyright IBM Corp. 2007
- *
- */
-
-#ifndef __ARCH_S390_LIB_UACCESS_H
-#define __ARCH_S390_LIB_UACCESS_H
-
-unsigned long copy_from_user_pt(void *to, const void __user *from, unsigned long n);
-unsigned long copy_to_user_pt(void __user *to, const void *from, unsigned long n);
-unsigned long copy_in_user_pt(void __user *to, const void __user *from, unsigned long n);
-unsigned long clear_user_pt(void __user *to, unsigned long n);
-unsigned long strnlen_user_pt(const char __user *src, unsigned long count);
-long strncpy_from_user_pt(char *dst, const char __user *src, long count);
-
-#endif /* __ARCH_S390_LIB_UACCESS_H */
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
deleted file mode 100644
index ae97b8df11aa12e154e3403fc080a603395d5346..0000000000000000000000000000000000000000
--- a/arch/s390/lib/uaccess_mvcos.c
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- *  Optimized user space space access functions based on mvcos.
- *
- *    Copyright IBM Corp. 2006
- *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- *		 Gerald Schaefer (gerald.schaefer@de.ibm.com)
- */
-
-#include <linux/jump_label.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <asm/facility.h>
-#include <asm/uaccess.h>
-#include <asm/futex.h>
-#include "uaccess.h"
-
-#ifndef CONFIG_64BIT
-#define AHI	"ahi"
-#define ALR	"alr"
-#define CLR	"clr"
-#define LHI	"lhi"
-#define SLR	"slr"
-#else
-#define AHI	"aghi"
-#define ALR	"algr"
-#define CLR	"clgr"
-#define LHI	"lghi"
-#define SLR	"slgr"
-#endif
-
-static struct static_key have_mvcos = STATIC_KEY_INIT_TRUE;
-
-static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
-						 unsigned long size)
-{
-	register unsigned long reg0 asm("0") = 0x81UL;
-	unsigned long tmp1, tmp2;
-
-	tmp1 = -4096UL;
-	asm volatile(
-		"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
-		"9: jz    7f\n"
-		"1:"ALR"  %0,%3\n"
-		"  "SLR"  %1,%3\n"
-		"  "SLR"  %2,%3\n"
-		"   j     0b\n"
-		"2: la    %4,4095(%1)\n"/* %4 = ptr + 4095 */
-		"   nr    %4,%3\n"	/* %4 = (ptr + 4095) & -4096 */
-		"  "SLR"  %4,%1\n"
-		"  "CLR"  %0,%4\n"	/* copy crosses next page boundary? */
-		"   jnh   4f\n"
-		"3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
-		"10:"SLR"  %0,%4\n"
-		"  "ALR"  %2,%4\n"
-		"4:"LHI"  %4,-1\n"
-		"  "ALR"  %4,%0\n"	/* copy remaining size, subtract 1 */
-		"   bras  %3,6f\n"	/* memset loop */
-		"   xc    0(1,%2),0(%2)\n"
-		"5: xc    0(256,%2),0(%2)\n"
-		"   la    %2,256(%2)\n"
-		"6:"AHI"  %4,-256\n"
-		"   jnm   5b\n"
-		"   ex    %4,0(%3)\n"
-		"   j     8f\n"
-		"7:"SLR"  %0,%0\n"
-		"8: \n"
-		EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b)
-		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
-		: "d" (reg0) : "cc", "memory");
-	return size;
-}
-
-unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-	if (static_key_true(&have_mvcos))
-		return copy_from_user_mvcos(to, from, n);
-	return copy_from_user_pt(to, from, n);
-}
-EXPORT_SYMBOL(__copy_from_user);
-
-static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
-					       unsigned long size)
-{
-	register unsigned long reg0 asm("0") = 0x810000UL;
-	unsigned long tmp1, tmp2;
-
-	tmp1 = -4096UL;
-	asm volatile(
-		"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
-		"6: jz    4f\n"
-		"1:"ALR"  %0,%3\n"
-		"  "SLR"  %1,%3\n"
-		"  "SLR"  %2,%3\n"
-		"   j     0b\n"
-		"2: la    %4,4095(%1)\n"/* %4 = ptr + 4095 */
-		"   nr    %4,%3\n"	/* %4 = (ptr + 4095) & -4096 */
-		"  "SLR"  %4,%1\n"
-		"  "CLR"  %0,%4\n"	/* copy crosses next page boundary? */
-		"   jnh   5f\n"
-		"3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
-		"7:"SLR"  %0,%4\n"
-		"   j     5f\n"
-		"4:"SLR"  %0,%0\n"
-		"5: \n"
-		EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
-		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
-		: "d" (reg0) : "cc", "memory");
-	return size;
-}
-
-unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
-{
-	if (static_key_true(&have_mvcos))
-		return copy_to_user_mvcos(to, from, n);
-	return copy_to_user_pt(to, from, n);
-}
-EXPORT_SYMBOL(__copy_to_user);
-
-static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
-					       unsigned long size)
-{
-	register unsigned long reg0 asm("0") = 0x810081UL;
-	unsigned long tmp1, tmp2;
-
-	tmp1 = -4096UL;
-	/* FIXME: copy with reduced length. */
-	asm volatile(
-		"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
-		"   jz    2f\n"
-		"1:"ALR"  %0,%3\n"
-		"  "SLR"  %1,%3\n"
-		"  "SLR"  %2,%3\n"
-		"   j     0b\n"
-		"2:"SLR"  %0,%0\n"
-		"3: \n"
-		EX_TABLE(0b,3b)
-		: "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
-		: "d" (reg0) : "cc", "memory");
-	return size;
-}
-
-unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n)
-{
-	if (static_key_true(&have_mvcos))
-		return copy_in_user_mvcos(to, from, n);
-	return copy_in_user_pt(to, from, n);
-}
-EXPORT_SYMBOL(__copy_in_user);
-
-static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
-{
-	register unsigned long reg0 asm("0") = 0x810000UL;
-	unsigned long tmp1, tmp2;
-
-	tmp1 = -4096UL;
-	asm volatile(
-		"0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
-		"   jz    4f\n"
-		"1:"ALR"  %0,%2\n"
-		"  "SLR"  %1,%2\n"
-		"   j     0b\n"
-		"2: la    %3,4095(%1)\n"/* %4 = to + 4095 */
-		"   nr    %3,%2\n"	/* %4 = (to + 4095) & -4096 */
-		"  "SLR"  %3,%1\n"
-		"  "CLR"  %0,%3\n"	/* copy crosses next page boundary? */
-		"   jnh   5f\n"
-		"3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
-		"  "SLR"  %0,%3\n"
-		"   j     5f\n"
-		"4:"SLR"  %0,%0\n"
-		"5: \n"
-		EX_TABLE(0b,2b) EX_TABLE(3b,5b)
-		: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
-		: "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
-	return size;
-}
-
-unsigned long __clear_user(void __user *to, unsigned long size)
-{
-	if (static_key_true(&have_mvcos))
-		return clear_user_mvcos(to, size);
-	return clear_user_pt(to, size);
-}
-EXPORT_SYMBOL(__clear_user);
-
-static inline unsigned long strnlen_user_mvcos(const char __user *src,
-					       unsigned long count)
-{
-	unsigned long done, len, offset, len_str;
-	char buf[256];
-
-	done = 0;
-	do {
-		offset = (unsigned long)src & ~PAGE_MASK;
-		len = min(256UL, PAGE_SIZE - offset);
-		len = min(count - done, len);
-		if (copy_from_user_mvcos(buf, src, len))
-			return 0;
-		len_str = strnlen(buf, len);
-		done += len_str;
-		src += len_str;
-	} while ((len_str == len) && (done < count));
-	return done + 1;
-}
-
-unsigned long __strnlen_user(const char __user *src, unsigned long count)
-{
-	if (static_key_true(&have_mvcos))
-		return strnlen_user_mvcos(src, count);
-	return strnlen_user_pt(src, count);
-}
-EXPORT_SYMBOL(__strnlen_user);
-
-static inline long strncpy_from_user_mvcos(char *dst, const char __user *src,
-					   long count)
-{
-	unsigned long done, len, offset, len_str;
-
-	if (unlikely(count <= 0))
-		return 0;
-	done = 0;
-	do {
-		offset = (unsigned long)src & ~PAGE_MASK;
-		len = min(count - done, PAGE_SIZE - offset);
-		if (copy_from_user_mvcos(dst, src, len))
-			return -EFAULT;
-		len_str = strnlen(dst, len);
-		done += len_str;
-		src += len_str;
-		dst += len_str;
-	} while ((len_str == len) && (done < count));
-	return done;
-}
-
-long __strncpy_from_user(char *dst, const char __user *src, long count)
-{
-	if (static_key_true(&have_mvcos))
-		return strncpy_from_user_mvcos(dst, src, count);
-	return strncpy_from_user_pt(dst, src, count);
-}
-EXPORT_SYMBOL(__strncpy_from_user);
-
-/*
- * The uaccess page tabe walk variant can be enforced with the "uaccesspt"
- * kernel parameter. This is mainly for debugging purposes.
- */
-static int force_uaccess_pt __initdata;
-
-static int __init parse_uaccess_pt(char *__unused)
-{
-	force_uaccess_pt = 1;
-	return 0;
-}
-early_param("uaccesspt", parse_uaccess_pt);
-
-static int __init uaccess_init(void)
-{
-	if (IS_ENABLED(CONFIG_32BIT) || force_uaccess_pt || !test_facility(27))
-		static_key_slow_dec(&have_mvcos);
-	return 0;
-}
-early_initcall(uaccess_init);
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
deleted file mode 100644
index 8d39760bae68f9f4e65bbbf4d42d5db25ef4958c..0000000000000000000000000000000000000000
--- a/arch/s390/lib/uaccess_pt.c
+++ /dev/null
@@ -1,471 +0,0 @@
-/*
- *  User access functions based on page table walks for enhanced
- *  system layout without hardware support.
- *
- *    Copyright IBM Corp. 2006, 2012
- *    Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
- */
-
-#include <linux/errno.h>
-#include <linux/hardirq.h>
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-#include <asm/uaccess.h>
-#include <asm/futex.h>
-#include "uaccess.h"
-
-#ifndef CONFIG_64BIT
-#define AHI	"ahi"
-#define SLR	"slr"
-#else
-#define AHI	"aghi"
-#define SLR	"slgr"
-#endif
-
-static unsigned long strnlen_kernel(const char __user *src, unsigned long count)
-{
-	register unsigned long reg0 asm("0") = 0UL;
-	unsigned long tmp1, tmp2;
-
-	asm volatile(
-		"   la	  %2,0(%1)\n"
-		"   la	  %3,0(%0,%1)\n"
-		"  "SLR"  %0,%0\n"
-		"0: srst  %3,%2\n"
-		"   jo	  0b\n"
-		"   la	  %0,1(%3)\n"	/* strnlen_kernel results includes \0 */
-		"  "SLR"  %0,%1\n"
-		"1:\n"
-		EX_TABLE(0b,1b)
-		: "+a" (count), "+a" (src), "=a" (tmp1), "=a" (tmp2)
-		: "d" (reg0) : "cc", "memory");
-	return count;
-}
-
-static unsigned long copy_in_kernel(void __user *to, const void __user *from,
-				    unsigned long count)
-{
-	unsigned long tmp1;
-
-	asm volatile(
-		"  "AHI"  %0,-1\n"
-		"   jo	  5f\n"
-		"   bras  %3,3f\n"
-		"0:"AHI"  %0,257\n"
-		"1: mvc	  0(1,%1),0(%2)\n"
-		"   la	  %1,1(%1)\n"
-		"   la	  %2,1(%2)\n"
-		"  "AHI"  %0,-1\n"
-		"   jnz	  1b\n"
-		"   j	  5f\n"
-		"2: mvc	  0(256,%1),0(%2)\n"
-		"   la	  %1,256(%1)\n"
-		"   la	  %2,256(%2)\n"
-		"3:"AHI"  %0,-256\n"
-		"   jnm	  2b\n"
-		"4: ex	  %0,1b-0b(%3)\n"
-		"5:"SLR"  %0,%0\n"
-		"6:\n"
-		EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
-		: "+a" (count), "+a" (to), "+a" (from), "=a" (tmp1)
-		: : "cc", "memory");
-	return count;
-}
-
-/*
- * Returns kernel address for user virtual address. If the returned address is
- * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occurred and the
- * address contains the (negative) exception code.
- */
-#ifdef CONFIG_64BIT
-
-static unsigned long follow_table(struct mm_struct *mm,
-				  unsigned long address, int write)
-{
-	unsigned long *table = (unsigned long *)__pa(mm->pgd);
-
-	if (unlikely(address > mm->context.asce_limit - 1))
-		return -0x38UL;
-	switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
-	case _ASCE_TYPE_REGION1:
-		table = table + ((address >> 53) & 0x7ff);
-		if (unlikely(*table & _REGION_ENTRY_INVALID))
-			return -0x39UL;
-		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
-		/* fallthrough */
-	case _ASCE_TYPE_REGION2:
-		table = table + ((address >> 42) & 0x7ff);
-		if (unlikely(*table & _REGION_ENTRY_INVALID))
-			return -0x3aUL;
-		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
-		/* fallthrough */
-	case _ASCE_TYPE_REGION3:
-		table = table + ((address >> 31) & 0x7ff);
-		if (unlikely(*table & _REGION_ENTRY_INVALID))
-			return -0x3bUL;
-		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
-		/* fallthrough */
-	case _ASCE_TYPE_SEGMENT:
-		table = table + ((address >> 20) & 0x7ff);
-		if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
-			return -0x10UL;
-		if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
-			if (write && (*table & _SEGMENT_ENTRY_PROTECT))
-				return -0x04UL;
-			return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
-				(address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
-		}
-		table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
-	}
-	table = table + ((address >> 12) & 0xff);
-	if (unlikely(*table & _PAGE_INVALID))
-		return -0x11UL;
-	if (write && (*table & _PAGE_PROTECT))
-		return -0x04UL;
-	return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
-}
-
-#else /* CONFIG_64BIT */
-
-static unsigned long follow_table(struct mm_struct *mm,
-				  unsigned long address, int write)
-{
-	unsigned long *table = (unsigned long *)__pa(mm->pgd);
-
-	table = table + ((address >> 20) & 0x7ff);
-	if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
-		return -0x10UL;
-	table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
-	table = table + ((address >> 12) & 0xff);
-	if (unlikely(*table & _PAGE_INVALID))
-		return -0x11UL;
-	if (write && (*table & _PAGE_PROTECT))
-		return -0x04UL;
-	return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
-}
-
-#endif /* CONFIG_64BIT */
-
-static inline unsigned long __user_copy_pt(unsigned long uaddr, void *kptr,
-					   unsigned long n, int write_user)
-{
-	struct mm_struct *mm = current->mm;
-	unsigned long offset, done, size, kaddr;
-	void *from, *to;
-
-	if (!mm)
-		return n;
-	done = 0;
-retry:
-	spin_lock(&mm->page_table_lock);
-	do {
-		kaddr = follow_table(mm, uaddr, write_user);
-		if (IS_ERR_VALUE(kaddr))
-			goto fault;
-
-		offset = uaddr & ~PAGE_MASK;
-		size = min(n - done, PAGE_SIZE - offset);
-		if (write_user) {
-			to = (void *) kaddr;
-			from = kptr + done;
-		} else {
-			from = (void *) kaddr;
-			to = kptr + done;
-		}
-		memcpy(to, from, size);
-		done += size;
-		uaddr += size;
-	} while (done < n);
-	spin_unlock(&mm->page_table_lock);
-	return n - done;
-fault:
-	spin_unlock(&mm->page_table_lock);
-	if (__handle_fault(uaddr, -kaddr, write_user))
-		return n - done;
-	goto retry;
-}
-
-/*
- * Do DAT for user address by page table walk, return kernel address.
- * This function needs to be called with current->mm->page_table_lock held.
- */
-static inline unsigned long __dat_user_addr(unsigned long uaddr, int write)
-{
-	struct mm_struct *mm = current->mm;
-	unsigned long kaddr;
-	int rc;
-
-retry:
-	kaddr = follow_table(mm, uaddr, write);
-	if (IS_ERR_VALUE(kaddr))
-		goto fault;
-
-	return kaddr;
-fault:
-	spin_unlock(&mm->page_table_lock);
-	rc = __handle_fault(uaddr, -kaddr, write);
-	spin_lock(&mm->page_table_lock);
-	if (!rc)
-		goto retry;
-	return 0;
-}
-
-unsigned long copy_from_user_pt(void *to, const void __user *from, unsigned long n)
-{
-	unsigned long rc;
-
-	if (segment_eq(get_fs(), KERNEL_DS))
-		return copy_in_kernel((void __user *) to, from, n);
-	rc = __user_copy_pt((unsigned long) from, to, n, 0);
-	if (unlikely(rc))
-		memset(to + n - rc, 0, rc);
-	return rc;
-}
-
-unsigned long copy_to_user_pt(void __user *to, const void *from, unsigned long n)
-{
-	if (segment_eq(get_fs(), KERNEL_DS))
-		return copy_in_kernel(to, (void __user *) from, n);
-	return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
-}
-
-unsigned long clear_user_pt(void __user *to, unsigned long n)
-{
-	void *zpage = (void *) empty_zero_page;
-	unsigned long done, size, ret;
-
-	done = 0;
-	do {
-		if (n - done > PAGE_SIZE)
-			size = PAGE_SIZE;
-		else
-			size = n - done;
-		if (segment_eq(get_fs(), KERNEL_DS))
-			ret = copy_in_kernel(to, (void __user *) zpage, n);
-		else
-			ret = __user_copy_pt((unsigned long) to, zpage, size, 1);
-		done += size;
-		to += size;
-		if (ret)
-			return ret + n - done;
-	} while (done < n);
-	return 0;
-}
-
-unsigned long strnlen_user_pt(const char __user *src, unsigned long count)
-{
-	unsigned long uaddr = (unsigned long) src;
-	struct mm_struct *mm = current->mm;
-	unsigned long offset, done, len, kaddr;
-	unsigned long len_str;
-
-	if (unlikely(!count))
-		return 0;
-	if (segment_eq(get_fs(), KERNEL_DS))
-		return strnlen_kernel(src, count);
-	if (!mm)
-		return 0;
-	done = 0;
-retry:
-	spin_lock(&mm->page_table_lock);
-	do {
-		kaddr = follow_table(mm, uaddr, 0);
-		if (IS_ERR_VALUE(kaddr))
-			goto fault;
-
-		offset = uaddr & ~PAGE_MASK;
-		len = min(count - done, PAGE_SIZE - offset);
-		len_str = strnlen((char *) kaddr, len);
-		done += len_str;
-		uaddr += len_str;
-	} while ((len_str == len) && (done < count));
-	spin_unlock(&mm->page_table_lock);
-	return done + 1;
-fault:
-	spin_unlock(&mm->page_table_lock);
-	if (__handle_fault(uaddr, -kaddr, 0))
-		return 0;
-	goto retry;
-}
-
-long strncpy_from_user_pt(char *dst, const char __user *src, long count)
-{
-	unsigned long done, len, offset, len_str;
-
-	if (unlikely(count <= 0))
-		return 0;
-	done = 0;
-	do {
-		offset = (unsigned long)src & ~PAGE_MASK;
-		len = min(count - done, PAGE_SIZE - offset);
-		if (segment_eq(get_fs(), KERNEL_DS)) {
-			if (copy_in_kernel((void __user *) dst, src, len))
-				return -EFAULT;
-		} else {
-			if (__user_copy_pt((unsigned long) src, dst, len, 0))
-				return -EFAULT;
-		}
-		len_str = strnlen(dst, len);
-		done += len_str;
-		src += len_str;
-		dst += len_str;
-	} while ((len_str == len) && (done < count));
-	return done;
-}
-
-unsigned long copy_in_user_pt(void __user *to, const void __user *from,
-			      unsigned long n)
-{
-	struct mm_struct *mm = current->mm;
-	unsigned long offset_max, uaddr, done, size, error_code;
-	unsigned long uaddr_from = (unsigned long) from;
-	unsigned long uaddr_to = (unsigned long) to;
-	unsigned long kaddr_to, kaddr_from;
-	int write_user;
-
-	if (segment_eq(get_fs(), KERNEL_DS))
-		return copy_in_kernel(to, from, n);
-	if (!mm)
-		return n;
-	done = 0;
-retry:
-	spin_lock(&mm->page_table_lock);
-	do {
-		write_user = 0;
-		uaddr = uaddr_from;
-		kaddr_from = follow_table(mm, uaddr_from, 0);
-		error_code = kaddr_from;
-		if (IS_ERR_VALUE(error_code))
-			goto fault;
-
-		write_user = 1;
-		uaddr = uaddr_to;
-		kaddr_to = follow_table(mm, uaddr_to, 1);
-		error_code = (unsigned long) kaddr_to;
-		if (IS_ERR_VALUE(error_code))
-			goto fault;
-
-		offset_max = max(uaddr_from & ~PAGE_MASK,
-				 uaddr_to & ~PAGE_MASK);
-		size = min(n - done, PAGE_SIZE - offset_max);
-
-		memcpy((void *) kaddr_to, (void *) kaddr_from, size);
-		done += size;
-		uaddr_from += size;
-		uaddr_to += size;
-	} while (done < n);
-	spin_unlock(&mm->page_table_lock);
-	return n - done;
-fault:
-	spin_unlock(&mm->page_table_lock);
-	if (__handle_fault(uaddr, -error_code, write_user))
-		return n - done;
-	goto retry;
-}
-
-#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg)	\
-	asm volatile("0: l   %1,0(%6)\n"				\
-		     "1: " insn						\
-		     "2: cs  %1,%2,0(%6)\n"				\
-		     "3: jl  1b\n"					\
-		     "   lhi %0,0\n"					\
-		     "4:\n"						\
-		     EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b)	\
-		     : "=d" (ret), "=&d" (oldval), "=&d" (newval),	\
-		       "=m" (*uaddr)					\
-		     : "0" (-EFAULT), "d" (oparg), "a" (uaddr),		\
-		       "m" (*uaddr) : "cc" );
-
-static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
-{
-	int oldval = 0, newval, ret;
-
-	switch (op) {
-	case FUTEX_OP_SET:
-		__futex_atomic_op("lr %2,%5\n",
-				  ret, oldval, newval, uaddr, oparg);
-		break;
-	case FUTEX_OP_ADD:
-		__futex_atomic_op("lr %2,%1\nar %2,%5\n",
-				  ret, oldval, newval, uaddr, oparg);
-		break;
-	case FUTEX_OP_OR:
-		__futex_atomic_op("lr %2,%1\nor %2,%5\n",
-				  ret, oldval, newval, uaddr, oparg);
-		break;
-	case FUTEX_OP_ANDN:
-		__futex_atomic_op("lr %2,%1\nnr %2,%5\n",
-				  ret, oldval, newval, uaddr, oparg);
-		break;
-	case FUTEX_OP_XOR:
-		__futex_atomic_op("lr %2,%1\nxr %2,%5\n",
-				  ret, oldval, newval, uaddr, oparg);
-		break;
-	default:
-		ret = -ENOSYS;
-	}
-	if (ret == 0)
-		*old = oldval;
-	return ret;
-}
-
-int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old)
-{
-	int ret;
-
-	if (segment_eq(get_fs(), KERNEL_DS))
-		return __futex_atomic_op_pt(op, uaddr, oparg, old);
-	if (unlikely(!current->mm))
-		return -EFAULT;
-	spin_lock(&current->mm->page_table_lock);
-	uaddr = (u32 __force __user *)
-		__dat_user_addr((__force unsigned long) uaddr, 1);
-	if (!uaddr) {
-		spin_unlock(&current->mm->page_table_lock);
-		return -EFAULT;
-	}
-	get_page(virt_to_page(uaddr));
-	spin_unlock(&current->mm->page_table_lock);
-	ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
-	put_page(virt_to_page(uaddr));
-	return ret;
-}
-
-static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
-				     u32 oldval, u32 newval)
-{
-	int ret;
-
-	asm volatile("0: cs   %1,%4,0(%5)\n"
-		     "1: la   %0,0\n"
-		     "2:\n"
-		     EX_TABLE(0b,2b) EX_TABLE(1b,2b)
-		     : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
-		     : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
-		     : "cc", "memory" );
-	*uval = oldval;
-	return ret;
-}
-
-int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
-				  u32 oldval, u32 newval)
-{
-	int ret;
-
-	if (segment_eq(get_fs(), KERNEL_DS))
-		return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
-	if (unlikely(!current->mm))
-		return -EFAULT;
-	spin_lock(&current->mm->page_table_lock);
-	uaddr = (u32 __force __user *)
-		__dat_user_addr((__force unsigned long) uaddr, 1);
-	if (!uaddr) {
-		spin_unlock(&current->mm->page_table_lock);
-		return -EFAULT;
-	}
-	get_page(virt_to_page(uaddr));
-	spin_unlock(&current->mm->page_table_lock);
-	ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
-	put_page(virt_to_page(uaddr));
-	return ret;
-}
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 750565f72e0638ef6f3ea0c09070e09a84ab2cfb..f93e6c2d4ba5e15117c79e732ca827d57d7b79c9 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -105,21 +105,24 @@ void bust_spinlocks(int yes)
  * Returns the address space associated with the fault.
  * Returns 0 for kernel space and 1 for user space.
  */
-static inline int user_space_fault(unsigned long trans_exc_code)
+static inline int user_space_fault(struct pt_regs *regs)
 {
+	unsigned long trans_exc_code;
+
 	/*
 	 * The lowest two bits of the translation exception
 	 * identification indicate which paging table was used.
 	 */
-	trans_exc_code &= 3;
-	if (trans_exc_code == 2)
-		/* Access via secondary space, set_fs setting decides */
+	trans_exc_code = regs->int_parm_long & 3;
+	if (trans_exc_code == 3) /* home space -> kernel */
+		return 0;
+	if (user_mode(regs))
+		return 1;
+	if (trans_exc_code == 2) /* secondary space -> set_fs */
 		return current->thread.mm_segment.ar4;
-	/*
-	 * Access via primary space or access register is from user space
-	 * and access via home space is from the kernel.
-	 */
-	return trans_exc_code != 3;
+	if (current->flags & PF_VCPU)
+		return 1;
+	return 0;
 }
 
 static inline void report_user_fault(struct pt_regs *regs, long signr)
@@ -171,7 +174,7 @@ static noinline void do_no_context(struct pt_regs *regs)
 	 * terminate things with extreme prejudice.
 	 */
 	address = regs->int_parm_long & __FAIL_ADDR_MASK;
-	if (!user_space_fault(regs->int_parm_long))
+	if (!user_space_fault(regs))
 		printk(KERN_ALERT "Unable to handle kernel pointer dereference"
 		       " at virtual kernel address %p\n", (void *)address);
 	else
@@ -291,7 +294,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
 	 * user context.
 	 */
 	fault = VM_FAULT_BADCONTEXT;
-	if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
+	if (unlikely(!user_space_fault(regs) || in_atomic() || !mm))
 		goto out;
 
 	address = trans_exc_code & __FAIL_ADDR_MASK;
@@ -423,30 +426,6 @@ void __kprobes do_dat_exception(struct pt_regs *regs)
 		do_fault_error(regs, fault);
 }
 
-int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
-{
-	struct pt_regs regs;
-	int access, fault;
-
-	/* Emulate a uaccess fault from kernel mode. */
-	regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK;
-	if (!irqs_disabled())
-		regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
-	regs.psw.addr = (unsigned long) __builtin_return_address(0);
-	regs.psw.addr |= PSW_ADDR_AMODE;
-	regs.int_code = pgm_int_code;
-	regs.int_parm_long = (uaddr & PAGE_MASK) | 2;
-	access = write ? VM_WRITE : VM_READ;
-	fault = do_exception(&regs, access);
-	/*
-	 * Since the fault happened in kernel mode while performing a uaccess
-	 * all we need to do now is emulating a fixup in case "fault" is not
-	 * zero.
-	 * For the calling uaccess functions this results always in -EFAULT.
-	 */
-	return fault ? -EFAULT : 0;
-}
-
 #ifdef CONFIG_PFAULT 
 /*
  * 'pfault' pseudo page faults routines.
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index c57c6338018459bf80ba1cb7fb1f9f1f510441d9..b5745dc9c6b506fb951ba6bf0de33e261c770297 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -54,7 +54,7 @@ static void __crst_table_upgrade(void *arg)
 	struct mm_struct *mm = arg;
 
 	if (current->active_mm == mm)
-		update_user_asce(mm);
+		update_user_asce(mm, 1);
 	__tlb_flush_local();
 }
 
@@ -108,7 +108,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
 	pgd_t *pgd;
 
 	if (current->active_mm == mm) {
-		clear_user_asce(mm);
+		clear_user_asce(mm, 1);
 		__tlb_flush_mm(mm);
 	}
 	while (mm->context.asce_limit > limit) {
@@ -134,7 +134,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
 		crst_table_free(mm, (unsigned long *) pgd);
 	}
 	if (current->active_mm == mm)
-		update_user_asce(mm);
+		update_user_asce(mm, 1);
 }
 #endif