Commit 9b27efe0 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:

 - further Spectre variant 1 fixes for user accessors.

 - kbuild cleanups (Masahiro Yamada)

 - hook up sync core functionality (Will Deacon)

 - nommu updates for hypervisor mode booting (Vladimir Murzin)

 - use compiler built-ins for fls and ffs (Nicolas Pitre)

* 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
  ARM: spectre-v1: mitigate user accesses
  ARM: spectre-v1: use get_user() for __get_user()
  ARM: use __inttype() in get_user()
  ARM: oabi-compat: copy semops using __copy_from_user()
  ARM: vfp: use __copy_from_user() when restoring VFP state
  ARM: 8785/1: use compiler built-ins for ffs and fls
  ARM: 8784/1: NOMMU: Allow enter in Hyp mode
  ARM: 8783/1: NOMMU: Extend check for VBAR support
  ARM: 8782/1: vfp: clean up arch/arm/vfp/Makefile
  ARM: signal: copy registers using __copy_from_user()
  ARM: tcm: ensure inline stub functions are marked static
  ARM: 8779/1: add endianness option to LDFLAGS instead of LD
  ARM: 8777/1: Hook up SYNC_CORE functionality for sys_membarrier()
parents 85a0b791 c61b466d
...@@ -5,10 +5,10 @@ ...@@ -5,10 +5,10 @@
# #
# Architecture requirements # Architecture requirements
# #
# * arm64 # * arm/arm64
# #
# Rely on eret context synchronization when returning from IPI handler, and # Rely on implicit context synchronization as a result of exception return
# when returning to user-space. # when returning from IPI handler, and when returning to user-space.
# #
# * x86 # * x86
# #
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
----------------------- -----------------------
| alpha: | TODO | | alpha: | TODO |
| arc: | TODO | | arc: | TODO |
| arm: | TODO | | arm: | ok |
| arm64: | ok | | arm64: | ok |
| c6x: | TODO | | c6x: | TODO |
| h8300: | TODO | | h8300: | TODO |
......
...@@ -9,6 +9,7 @@ config ARM ...@@ -9,6 +9,7 @@ config ARM
select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KCOV select ARCH_HAS_KCOV
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_PTE_SPECIAL if ARM_LPAE select ARCH_HAS_PTE_SPECIAL if ARM_LPAE
select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_SET_MEMORY select ARCH_HAS_SET_MEMORY
......
...@@ -46,12 +46,12 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN),y) ...@@ -46,12 +46,12 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
KBUILD_CPPFLAGS += -mbig-endian KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__ARMEB__ CHECKFLAGS += -D__ARMEB__
AS += -EB AS += -EB
LD += -EB LDFLAGS += -EB
else else
KBUILD_CPPFLAGS += -mlittle-endian KBUILD_CPPFLAGS += -mlittle-endian
CHECKFLAGS += -D__ARMEL__ CHECKFLAGS += -D__ARMEL__
AS += -EL AS += -EL
LD += -EL LDFLAGS += -EL
endif endif
# #
......
...@@ -460,6 +460,10 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) ...@@ -460,6 +460,10 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
adds \tmp, \addr, #\size - 1 adds \tmp, \addr, #\size - 1
sbcccs \tmp, \tmp, \limit sbcccs \tmp, \tmp, \limit
bcs \bad bcs \bad
#ifdef CONFIG_CPU_SPECTRE
movcs \addr, #0
csdb
#endif
#endif #endif
.endm .endm
......
...@@ -215,7 +215,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); ...@@ -215,7 +215,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#if __LINUX_ARM_ARCH__ < 5 #if __LINUX_ARM_ARCH__ < 5
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__fls.h> #include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/__ffs.h> #include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/fls.h> #include <asm-generic/bitops/fls.h>
...@@ -223,93 +222,20 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); ...@@ -223,93 +222,20 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#else #else
static inline int constant_fls(int x)
{
int r = 32;
if (!x)
return 0;
if (!(x & 0xffff0000u)) {
x <<= 16;
r -= 16;
}
if (!(x & 0xff000000u)) {
x <<= 8;
r -= 8;
}
if (!(x & 0xf0000000u)) {
x <<= 4;
r -= 4;
}
if (!(x & 0xc0000000u)) {
x <<= 2;
r -= 2;
}
if (!(x & 0x80000000u)) {
x <<= 1;
r -= 1;
}
return r;
}
/*
* On ARMv5 and above those functions can be implemented around the
* clz instruction for much better code efficiency. __clz returns
* the number of leading zeros, zero input will return 32, and
* 0x80000000 will return 0.
*/
static inline unsigned int __clz(unsigned int x)
{
unsigned int ret;
asm("clz\t%0, %1" : "=r" (ret) : "r" (x));
return ret;
}
/*
* fls() returns zero if the input is zero, otherwise returns the bit
* position of the last set bit, where the LSB is 1 and MSB is 32.
*/
static inline int fls(int x)
{
if (__builtin_constant_p(x))
return constant_fls(x);
return 32 - __clz(x);
}
/*
* __fls() returns the bit position of the last bit set, where the
* LSB is 0 and MSB is 31. Zero input is undefined.
*/
static inline unsigned long __fls(unsigned long x)
{
return fls(x) - 1;
}
/*
* ffs() returns zero if the input was zero, otherwise returns the bit
* position of the first set bit, where the LSB is 1 and MSB is 32.
*/
static inline int ffs(int x)
{
return fls(x & -x);
}
/* /*
* __ffs() returns the bit position of the first bit set, where the * On ARMv5 and above, the gcc built-ins may rely on the clz instruction
* LSB is 0 and MSB is 31. Zero input is undefined. * and produce optimal inlined code in all cases. On ARMv7 it is even
* better by also using the rbit instruction.
*/ */
static inline unsigned long __ffs(unsigned long x) #include <asm-generic/bitops/builtin-__fls.h>
{ #include <asm-generic/bitops/builtin-__ffs.h>
return ffs(x) - 1; #include <asm-generic/bitops/builtin-fls.h>
} #include <asm-generic/bitops/builtin-ffs.h>
#define ffz(x) __ffs( ~(x) )
#endif #endif
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/sched.h>
......
...@@ -123,8 +123,8 @@ struct user_vfp_exc; ...@@ -123,8 +123,8 @@ struct user_vfp_exc;
extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *, extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
struct user_vfp_exc __user *); struct user_vfp_exc __user *);
extern int vfp_restore_user_hwstate(struct user_vfp __user *, extern int vfp_restore_user_hwstate(struct user_vfp *,
struct user_vfp_exc __user *); struct user_vfp_exc *);
#endif #endif
/* /*
......
...@@ -84,6 +84,13 @@ static inline void set_fs(mm_segment_t fs) ...@@ -84,6 +84,13 @@ static inline void set_fs(mm_segment_t fs)
: "cc"); \ : "cc"); \
flag; }) flag; })
/*
* This is a type: either unsigned long, if the argument fits into
* that type, or otherwise unsigned long long.
*/
#define __inttype(x) \
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
/* /*
* Single-value transfer routines. They automatically use the right * Single-value transfer routines. They automatically use the right
* size if we just have the right pointer type. Note that the functions * size if we just have the right pointer type. Note that the functions
...@@ -153,7 +160,7 @@ extern int __get_user_64t_4(void *); ...@@ -153,7 +160,7 @@ extern int __get_user_64t_4(void *);
({ \ ({ \
unsigned long __limit = current_thread_info()->addr_limit - 1; \ unsigned long __limit = current_thread_info()->addr_limit - 1; \
register typeof(*(p)) __user *__p asm("r0") = (p); \ register typeof(*(p)) __user *__p asm("r0") = (p); \
register typeof(x) __r2 asm("r2"); \ register __inttype(x) __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \ register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \ register int __e asm("r0"); \
unsigned int __ua_flags = uaccess_save_and_enable(); \ unsigned int __ua_flags = uaccess_save_and_enable(); \
...@@ -243,6 +250,16 @@ static inline void set_fs(mm_segment_t fs) ...@@ -243,6 +250,16 @@ static inline void set_fs(mm_segment_t fs)
#define user_addr_max() \ #define user_addr_max() \
(uaccess_kernel() ? ~0UL : get_fs()) (uaccess_kernel() ? ~0UL : get_fs())
#ifdef CONFIG_CPU_SPECTRE
/*
* When mitigating Spectre variant 1, it is not worth fixing the non-
* verifying accessors, because we need to add verification of the
* address space there. Force these to use the standard get_user()
* version instead.
*/
#define __get_user(x, ptr) get_user(x, ptr)
#else
/* /*
* The "__xxx" versions of the user access functions do not verify the * The "__xxx" versions of the user access functions do not verify the
* address space - it must have been done previously with a separate * address space - it must have been done previously with a separate
...@@ -259,12 +276,6 @@ static inline void set_fs(mm_segment_t fs) ...@@ -259,12 +276,6 @@ static inline void set_fs(mm_segment_t fs)
__gu_err; \ __gu_err; \
}) })
#define __get_user_error(x, ptr, err) \
({ \
__get_user_err((x), (ptr), err); \
(void) 0; \
})
#define __get_user_err(x, ptr, err) \ #define __get_user_err(x, ptr, err) \
do { \ do { \
unsigned long __gu_addr = (unsigned long)(ptr); \ unsigned long __gu_addr = (unsigned long)(ptr); \
...@@ -324,6 +335,7 @@ do { \ ...@@ -324,6 +335,7 @@ do { \
#define __get_user_asm_word(x, addr, err) \ #define __get_user_asm_word(x, addr, err) \
__get_user_asm(x, addr, err, ldr) __get_user_asm(x, addr, err, ldr)
#endif
#define __put_user_switch(x, ptr, __err, __fn) \ #define __put_user_switch(x, ptr, __err, __fn) \
......
...@@ -53,7 +53,11 @@ ENTRY(stext) ...@@ -53,7 +53,11 @@ ENTRY(stext)
THUMB(1: ) THUMB(1: )
#endif #endif
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode #ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install
#endif
@ ensure svc mode and all interrupts masked
safe_svcmode_maskall r9
@ and irqs disabled @ and irqs disabled
#if defined(CONFIG_CPU_CP15) #if defined(CONFIG_CPU_CP15)
mrc p15, 0, r9, c0, c0 @ get processor id mrc p15, 0, r9, c0, c0 @ get processor id
...@@ -89,7 +93,11 @@ ENTRY(secondary_startup) ...@@ -89,7 +93,11 @@ ENTRY(secondary_startup)
* the processor type - there is no need to check the machine type * the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor. * as it has already been validated by the primary processor.
*/ */
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 #ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install_secondary
#endif
safe_svcmode_maskall r9
#ifndef CONFIG_CPU_CP15 #ifndef CONFIG_CPU_CP15
ldr r9, =CONFIG_PROCESSOR_ID ldr r9, =CONFIG_PROCESSOR_ID
#else #else
......
...@@ -150,22 +150,18 @@ static int preserve_vfp_context(struct vfp_sigframe __user *frame) ...@@ -150,22 +150,18 @@ static int preserve_vfp_context(struct vfp_sigframe __user *frame)
static int restore_vfp_context(char __user **auxp) static int restore_vfp_context(char __user **auxp)
{ {
struct vfp_sigframe __user *frame = struct vfp_sigframe frame;
(struct vfp_sigframe __user *)*auxp; int err;
unsigned long magic;
unsigned long size;
int err = 0;
__get_user_error(magic, &frame->magic, err);
__get_user_error(size, &frame->size, err);
err = __copy_from_user(&frame, *auxp, sizeof(frame));
if (err) if (err)
return -EFAULT; return err;
if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE)
return -EINVAL; return -EINVAL;
*auxp += size; *auxp += sizeof(frame);
return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc); return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc);
} }
#endif #endif
...@@ -176,6 +172,7 @@ static int restore_vfp_context(char __user **auxp) ...@@ -176,6 +172,7 @@ static int restore_vfp_context(char __user **auxp)
static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
{ {
struct sigcontext context;
char __user *aux; char __user *aux;
sigset_t set; sigset_t set;
int err; int err;
...@@ -184,23 +181,26 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) ...@@ -184,23 +181,26 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
if (err == 0) if (err == 0)
set_current_blocked(&set); set_current_blocked(&set);
__get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context));
__get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); if (err == 0) {
__get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err); regs->ARM_r0 = context.arm_r0;
__get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err); regs->ARM_r1 = context.arm_r1;
__get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err); regs->ARM_r2 = context.arm_r2;
__get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err); regs->ARM_r3 = context.arm_r3;
__get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err); regs->ARM_r4 = context.arm_r4;
__get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err); regs->ARM_r5 = context.arm_r5;
__get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err); regs->ARM_r6 = context.arm_r6;
__get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err); regs->ARM_r7 = context.arm_r7;
__get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err); regs->ARM_r8 = context.arm_r8;
__get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err); regs->ARM_r9 = context.arm_r9;
__get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err); regs->ARM_r10 = context.arm_r10;
__get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err); regs->ARM_fp = context.arm_fp;
__get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err); regs->ARM_ip = context.arm_ip;
__get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err); regs->ARM_sp = context.arm_sp;
__get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err); regs->ARM_lr = context.arm_lr;
regs->ARM_pc = context.arm_pc;
regs->ARM_cpsr = context.arm_cpsr;
}
err |= !valid_user_regs(regs); err |= !valid_user_regs(regs);
......
...@@ -329,9 +329,11 @@ asmlinkage long sys_oabi_semtimedop(int semid, ...@@ -329,9 +329,11 @@ asmlinkage long sys_oabi_semtimedop(int semid,
return -ENOMEM; return -ENOMEM;
err = 0; err = 0;
for (i = 0; i < nsops; i++) { for (i = 0; i < nsops; i++) {
__get_user_error(sops[i].sem_num, &tsops->sem_num, err); struct oabi_sembuf osb;
__get_user_error(sops[i].sem_op, &tsops->sem_op, err); err |= __copy_from_user(&osb, tsops, sizeof(osb));
__get_user_error(sops[i].sem_flg, &tsops->sem_flg, err); sops[i].sem_num = osb.sem_num;
sops[i].sem_op = osb.sem_op;
sops[i].sem_flg = osb.sem_flg;
tsops++; tsops++;
} }
if (timeout) { if (timeout) {
......
...@@ -90,6 +90,15 @@ ...@@ -90,6 +90,15 @@
.text .text
ENTRY(arm_copy_from_user) ENTRY(arm_copy_from_user)
#ifdef CONFIG_CPU_SPECTRE
get_thread_info r3
ldr r3, [r3, #TI_ADDR_LIMIT]
adds ip, r1, r2 @ ip=addr+size
sub r3, r3, #1 @ addr_limit - 1
cmpcc ip, r3 @ if (addr+size > addr_limit - 1)
movcs r1, #0 @ addr = NULL
csdb
#endif
#include "copy_template.S" #include "copy_template.S"
......
...@@ -702,7 +702,6 @@ config ARM_THUMBEE ...@@ -702,7 +702,6 @@ config ARM_THUMBEE
config ARM_VIRT_EXT config ARM_VIRT_EXT
bool bool
depends on MMU
default y if CPU_V7 default y if CPU_V7
help help
Enable the kernel to make use of the ARM Virtualization Enable the kernel to make use of the ARM Virtualization
......
...@@ -53,7 +53,8 @@ static inline bool security_extensions_enabled(void) ...@@ -53,7 +53,8 @@ static inline bool security_extensions_enabled(void)
{ {
/* Check CPUID Identification Scheme before ID_PFR1 read */ /* Check CPUID Identification Scheme before ID_PFR1 read */
if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4); return cpuid_feature_extract(CPUID_EXT_PFR1, 4) ||
cpuid_feature_extract(CPUID_EXT_PFR1, 20);
return 0; return 0;
} }
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
void __init tcm_init(void); void __init tcm_init(void);
#else #else
/* No TCM support, just blank inlines to be optimized out */ /* No TCM support, just blank inlines to be optimized out */
inline void tcm_init(void) static inline void tcm_init(void)
{ {
} }
#endif #endif
...@@ -8,8 +8,5 @@ ...@@ -8,8 +8,5 @@
# asflags-y := -DDEBUG # asflags-y := -DDEBUG
KBUILD_AFLAGS :=$(KBUILD_AFLAGS:-msoft-float=-Wa,-mfpu=softvfp+vfp -mfloat-abi=soft) KBUILD_AFLAGS :=$(KBUILD_AFLAGS:-msoft-float=-Wa,-mfpu=softvfp+vfp -mfloat-abi=soft)
LDFLAGS +=--no-warn-mismatch
obj-y += vfp.o obj-y += vfpmodule.o entry.o vfphw.o vfpsingle.o vfpdouble.o
vfp-$(CONFIG_VFP) += vfpmodule.o entry.o vfphw.o vfpsingle.o vfpdouble.o
...@@ -596,13 +596,11 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp, ...@@ -596,13 +596,11 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
} }
/* Sanitise and restore the current VFP state from the provided structures. */ /* Sanitise and restore the current VFP state from the provided structures. */
int vfp_restore_user_hwstate(struct user_vfp __user *ufp, int vfp_restore_user_hwstate(struct user_vfp *ufp, struct user_vfp_exc *ufp_exc)
struct user_vfp_exc __user *ufp_exc)
{ {
struct thread_info *thread = current_thread_info(); struct thread_info *thread = current_thread_info();
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
unsigned long fpexc; unsigned long fpexc;
int err = 0;
/* Disable VFP to avoid corrupting the new thread state. */ /* Disable VFP to avoid corrupting the new thread state. */
vfp_flush_hwstate(thread); vfp_flush_hwstate(thread);
...@@ -611,17 +609,16 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp, ...@@ -611,17 +609,16 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
* Copy the floating point registers. There can be unused * Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details. * registers see asm/hwcap.h for details.
*/ */
err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs, memcpy(&hwstate->fpregs, &ufp->fpregs, sizeof(hwstate->fpregs));
sizeof(hwstate->fpregs));
/* /*
* Copy the status and control register. * Copy the status and control register.
*/ */
__get_user_error(hwstate->fpscr, &ufp->fpscr, err); hwstate->fpscr = ufp->fpscr;
/* /*
* Sanitise and restore the exception registers. * Sanitise and restore the exception registers.
*/ */
__get_user_error(fpexc, &ufp_exc->fpexc, err); fpexc = ufp_exc->fpexc;
/* Ensure the VFP is enabled. */ /* Ensure the VFP is enabled. */
fpexc |= FPEXC_EN; fpexc |= FPEXC_EN;
...@@ -630,10 +627,10 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp, ...@@ -630,10 +627,10 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
fpexc &= ~(FPEXC_EX | FPEXC_FP2V); fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
hwstate->fpexc = fpexc; hwstate->fpexc = fpexc;
__get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err); hwstate->fpinst = ufp_exc->fpinst;
__get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err); hwstate->fpinst2 = ufp_exc->fpinst2;
return err ? -EFAULT : 0; return 0;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment