Commit 805de8f4 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

atomic: Replace atomic_{set,clear}_mask() usage

Replace the deprecated atomic_{set,clear}_mask() usage with the now
ubiquous atomic_{or,andnot}() functions.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent de9e432c
...@@ -195,7 +195,7 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg) ...@@ -195,7 +195,7 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
local_irq_save(flags); local_irq_save(flags);
for_each_cpu(cpu, cpumask) { for_each_cpu(cpu, cpumask) {
bfin_ipi_data = &per_cpu(bfin_ipi, cpu); bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
atomic_set_mask((1 << msg), &bfin_ipi_data->bits); atomic_or((1 << msg), &bfin_ipi_data->bits);
atomic_inc(&bfin_ipi_data->count); atomic_inc(&bfin_ipi_data->count);
} }
local_irq_restore(flags); local_irq_restore(flags);
......
...@@ -156,7 +156,7 @@ void smp_flush_cache_all(void) ...@@ -156,7 +156,7 @@ void smp_flush_cache_all(void)
cpumask_clear_cpu(smp_processor_id(), &cpumask); cpumask_clear_cpu(smp_processor_id(), &cpumask);
spin_lock(&flushcache_lock); spin_lock(&flushcache_lock);
mask=cpumask_bits(&cpumask); mask=cpumask_bits(&cpumask);
atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); atomic_or(*mask, (atomic_t *)&flushcache_cpumask);
send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
_flush_cache_copyback_all(); _flush_cache_copyback_all();
while (flushcache_cpumask) while (flushcache_cpumask)
...@@ -407,7 +407,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, ...@@ -407,7 +407,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
flush_vma = vma; flush_vma = vma;
flush_va = va; flush_va = va;
mask=cpumask_bits(&cpumask); mask=cpumask_bits(&cpumask);
atomic_set_mask(*mask, (atomic_t *)&flush_cpumask); atomic_or(*mask, (atomic_t *)&flush_cpumask);
/* /*
* We have to send the IPI only to * We have to send the IPI only to
......
...@@ -119,7 +119,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, ...@@ -119,7 +119,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
flush_mm = mm; flush_mm = mm;
flush_va = va; flush_va = va;
#if NR_CPUS <= BITS_PER_LONG #if NR_CPUS <= BITS_PER_LONG
atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]); atomic_or(cpumask.bits[0], (atomic_t *)&flush_cpumask.bits[0]);
#else #else
#error Not supported. #error Not supported.
#endif #endif
......
...@@ -381,7 +381,7 @@ static void disable_sync_clock(void *dummy) ...@@ -381,7 +381,7 @@ static void disable_sync_clock(void *dummy)
* increase the "sequence" counter to avoid the race of an * increase the "sequence" counter to avoid the race of an
* etr event and the complete recovery against get_sync_clock. * etr event and the complete recovery against get_sync_clock.
*/ */
atomic_clear_mask(0x80000000, sw_ptr); atomic_andnot(0x80000000, sw_ptr);
atomic_inc(sw_ptr); atomic_inc(sw_ptr);
} }
...@@ -392,7 +392,7 @@ static void disable_sync_clock(void *dummy) ...@@ -392,7 +392,7 @@ static void disable_sync_clock(void *dummy)
static void enable_sync_clock(void) static void enable_sync_clock(void)
{ {
atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word); atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
atomic_set_mask(0x80000000, sw_ptr); atomic_or(0x80000000, sw_ptr);
} }
/* /*
......
...@@ -170,20 +170,20 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) ...@@ -170,20 +170,20 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
static void __set_cpu_idle(struct kvm_vcpu *vcpu) static void __set_cpu_idle(struct kvm_vcpu *vcpu)
{ {
atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
} }
static void __unset_cpu_idle(struct kvm_vcpu *vcpu) static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
{ {
atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
} }
static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
{ {
atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
&vcpu->arch.sie_block->cpuflags); &vcpu->arch.sie_block->cpuflags);
vcpu->arch.sie_block->lctl = 0x0000; vcpu->arch.sie_block->lctl = 0x0000;
vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
...@@ -196,7 +196,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) ...@@ -196,7 +196,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
{ {
atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); atomic_or(flag, &vcpu->arch.sie_block->cpuflags);
} }
static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
...@@ -919,7 +919,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) ...@@ -919,7 +919,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
spin_unlock(&li->lock); spin_unlock(&li->lock);
/* clear pending external calls set by sigp interpretation facility */ /* clear pending external calls set by sigp interpretation facility */
atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags); atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0; vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
} }
...@@ -1020,7 +1020,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) ...@@ -1020,7 +1020,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
li->irq.ext = irq->u.ext; li->irq.ext = irq->u.ext;
set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
return 0; return 0;
} }
...@@ -1035,7 +1035,7 @@ static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id) ...@@ -1035,7 +1035,7 @@ static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
/* another external call is pending */ /* another external call is pending */
return -EBUSY; return -EBUSY;
} }
atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
return 0; return 0;
} }
...@@ -1061,7 +1061,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) ...@@ -1061,7 +1061,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
return -EBUSY; return -EBUSY;
*extcall = irq->u.extcall; *extcall = irq->u.extcall;
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
return 0; return 0;
} }
...@@ -1133,7 +1133,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, ...@@ -1133,7 +1133,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
set_bit(irq->u.emerg.code, li->sigp_emerg_pending); set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
return 0; return 0;
} }
...@@ -1177,7 +1177,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu) ...@@ -1177,7 +1177,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu)
0, 0, 2); 0, 0, 2);
set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
return 0; return 0;
} }
...@@ -1190,7 +1190,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu) ...@@ -1190,7 +1190,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
0, 0, 2); 0, 0, 2);
set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
return 0; return 0;
} }
...@@ -1369,13 +1369,13 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type) ...@@ -1369,13 +1369,13 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
spin_lock(&li->lock); spin_lock(&li->lock);
switch (type) { switch (type) {
case KVM_S390_MCHK: case KVM_S390_MCHK:
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
break; break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags); atomic_or(CPUSTAT_IO_INT, li->cpuflags);
break; break;
default: default:
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
break; break;
} }
spin_unlock(&li->lock); spin_unlock(&li->lock);
......
...@@ -1215,12 +1215,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -1215,12 +1215,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
} }
restore_access_regs(vcpu->run->s.regs.acrs); restore_access_regs(vcpu->run->s.regs.acrs);
gmap_enable(vcpu->arch.gmap); gmap_enable(vcpu->arch.gmap);
atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
} }
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{ {
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
gmap_disable(vcpu->arch.gmap); gmap_disable(vcpu->arch.gmap);
if (test_kvm_facility(vcpu->kvm, 129)) { if (test_kvm_facility(vcpu->kvm, 129)) {
save_fp_ctl(&vcpu->run->s.regs.fpc); save_fp_ctl(&vcpu->run->s.regs.fpc);
...@@ -1320,9 +1320,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -1320,9 +1320,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
CPUSTAT_STOPPED); CPUSTAT_STOPPED);
if (test_kvm_facility(vcpu->kvm, 78)) if (test_kvm_facility(vcpu->kvm, 78))
atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags); atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
else if (test_kvm_facility(vcpu->kvm, 8)) else if (test_kvm_facility(vcpu->kvm, 8))
atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags); atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
kvm_s390_vcpu_setup_model(vcpu); kvm_s390_vcpu_setup_model(vcpu);
...@@ -1422,24 +1422,24 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) ...@@ -1422,24 +1422,24 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu) void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
{ {
atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
exit_sie(vcpu); exit_sie(vcpu);
} }
void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu) void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
{ {
atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
} }
static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu) static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
{ {
atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
exit_sie(vcpu); exit_sie(vcpu);
} }
static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
{ {
atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
} }
/* /*
...@@ -1448,7 +1448,7 @@ static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) ...@@ -1448,7 +1448,7 @@ static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
* return immediately. */ * return immediately. */
void exit_sie(struct kvm_vcpu *vcpu) void exit_sie(struct kvm_vcpu *vcpu)
{ {
atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
cpu_relax(); cpu_relax();
} }
...@@ -1672,19 +1672,19 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, ...@@ -1672,19 +1672,19 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
if (dbg->control & KVM_GUESTDBG_ENABLE) { if (dbg->control & KVM_GUESTDBG_ENABLE) {
vcpu->guest_debug = dbg->control; vcpu->guest_debug = dbg->control;
/* enforce guest PER */ /* enforce guest PER */
atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
if (dbg->control & KVM_GUESTDBG_USE_HW_BP) if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
rc = kvm_s390_import_bp_data(vcpu, dbg); rc = kvm_s390_import_bp_data(vcpu, dbg);
} else { } else {
atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
vcpu->arch.guestdbg.last_bp = 0; vcpu->arch.guestdbg.last_bp = 0;
} }
if (rc) { if (rc) {
vcpu->guest_debug = 0; vcpu->guest_debug = 0;
kvm_s390_clear_bp_data(vcpu); kvm_s390_clear_bp_data(vcpu);
atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
} }
return rc; return rc;
...@@ -1771,7 +1771,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) ...@@ -1771,7 +1771,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
if (!ibs_enabled(vcpu)) { if (!ibs_enabled(vcpu)) {
trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
atomic_set_mask(CPUSTAT_IBS, atomic_or(CPUSTAT_IBS,
&vcpu->arch.sie_block->cpuflags); &vcpu->arch.sie_block->cpuflags);
} }
goto retry; goto retry;
...@@ -1780,7 +1780,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) ...@@ -1780,7 +1780,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
if (ibs_enabled(vcpu)) { if (ibs_enabled(vcpu)) {
trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
atomic_clear_mask(CPUSTAT_IBS, atomic_andnot(CPUSTAT_IBS,
&vcpu->arch.sie_block->cpuflags); &vcpu->arch.sie_block->cpuflags);
} }
goto retry; goto retry;
...@@ -2280,7 +2280,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) ...@@ -2280,7 +2280,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
__disable_ibs_on_all_vcpus(vcpu->kvm); __disable_ibs_on_all_vcpus(vcpu->kvm);
} }
atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
/* /*
* Another VCPU might have used IBS while we were offline. * Another VCPU might have used IBS while we were offline.
* Let's play safe and flush the VCPU at startup. * Let's play safe and flush the VCPU at startup.
...@@ -2306,7 +2306,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) ...@@ -2306,7 +2306,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
kvm_s390_clear_stop_irq(vcpu); kvm_s390_clear_stop_irq(vcpu);
atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
__disable_ibs_on_vcpu(vcpu); __disable_ibs_on_vcpu(vcpu);
for (i = 0; i < online_vcpus; i++) { for (i = 0; i < online_vcpus; i++) {
......
...@@ -748,7 +748,7 @@ static int i915_drm_resume(struct drm_device *dev) ...@@ -748,7 +748,7 @@ static int i915_drm_resume(struct drm_device *dev)
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (i915_gem_init_hw(dev)) { if (i915_gem_init_hw(dev)) {
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
...@@ -5091,7 +5091,7 @@ int i915_gem_init(struct drm_device *dev) ...@@ -5091,7 +5091,7 @@ int i915_gem_init(struct drm_device *dev)
* for all other failure, such as an allocation failure, bail. * for all other failure, such as an allocation failure, bail.
*/ */
DRM_ERROR("Failed to initialize GPU, declaring it wedged\n"); DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
ret = 0; ret = 0;
} }
......
...@@ -2446,7 +2446,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev) ...@@ -2446,7 +2446,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
kobject_uevent_env(&dev->primary->kdev->kobj, kobject_uevent_env(&dev->primary->kdev->kobj,
KOBJ_CHANGE, reset_done_event); KOBJ_CHANGE, reset_done_event);
} else { } else {
atomic_set_mask(I915_WEDGED, &error->reset_counter); atomic_or(I915_WEDGED, &error->reset_counter);
} }
/* /*
...@@ -2574,7 +2574,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged, ...@@ -2574,7 +2574,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
i915_report_and_clear_eir(dev); i915_report_and_clear_eir(dev);
if (wedged) { if (wedged) {
atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, atomic_or(I915_RESET_IN_PROGRESS_FLAG,
&dev_priv->gpu_error.reset_counter); &dev_priv->gpu_error.reset_counter);
/* /*
......
...@@ -529,7 +529,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, ...@@ -529,7 +529,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
list_add_tail(&port->list, &adapter->port_list); list_add_tail(&port->list, &adapter->port_list);
write_unlock_irq(&adapter->port_list_lock); write_unlock_irq(&adapter->port_list_lock);
atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status); atomic_or(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
return port; return port;
......
This diff is collapsed.
...@@ -508,7 +508,7 @@ static void zfcp_fc_adisc_handler(void *data) ...@@ -508,7 +508,7 @@ static void zfcp_fc_adisc_handler(void *data)
/* port is good, unblock rport without going through erp */ /* port is good, unblock rport without going through erp */
zfcp_scsi_schedule_rport_register(port); zfcp_scsi_schedule_rport_register(port);
out: out:
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
put_device(&port->dev); put_device(&port->dev);
kmem_cache_free(zfcp_fc_req_cache, fc_req); kmem_cache_free(zfcp_fc_req_cache, fc_req);
} }
...@@ -564,14 +564,14 @@ void zfcp_fc_link_test_work(struct work_struct *work) ...@@ -564,14 +564,14 @@ void zfcp_fc_link_test_work(struct work_struct *work)
if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST) if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
goto out; goto out;
atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); atomic_or(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
retval = zfcp_fc_adisc(port); retval = zfcp_fc_adisc(port);
if (retval == 0) if (retval == 0)
return; return;
/* send of ADISC was not possible */ /* send of ADISC was not possible */
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
zfcp_erp_port_forced_reopen(port, 0, "fcltwk1"); zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
out: out:
...@@ -640,7 +640,7 @@ static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh) ...@@ -640,7 +640,7 @@ static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC)) if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
return; return;
atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status); atomic_andnot(ZFCP_STATUS_COMMON_NOESC, &port->status);
if ((port->supported_classes != 0) || if ((port->supported_classes != 0) ||
!list_empty(&port->unit_list)) !list_empty(&port->unit_list))
......
...@@ -114,7 +114,7 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, ...@@ -114,7 +114,7 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
return; return;
atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
zfcp_scsi_schedule_rports_block(adapter); zfcp_scsi_schedule_rports_block(adapter);
...@@ -345,7 +345,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) ...@@ -345,7 +345,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3"); zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
break; break;
case FSF_PROT_HOST_CONNECTION_INITIALIZING: case FSF_PROT_HOST_CONNECTION_INITIALIZING:
atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
&adapter->status); &adapter->status);
break; break;
case FSF_PROT_DUPLICATE_REQUEST_ID: case FSF_PROT_DUPLICATE_REQUEST_ID:
...@@ -554,7 +554,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) ...@@ -554,7 +554,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1"); zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
return; return;
} }
atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
&adapter->status); &adapter->status);
break; break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
...@@ -567,7 +567,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) ...@@ -567,7 +567,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
/* avoids adapter shutdown to be able to recognize /* avoids adapter shutdown to be able to recognize
* events such as LINK UP */ * events such as LINK UP */
atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
&adapter->status); &adapter->status);
zfcp_fsf_link_down_info_eval(req, zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info); &qtcb->header.fsf_status_qual.link_down_info);
...@@ -1394,9 +1394,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) ...@@ -1394,9 +1394,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
break; break;
case FSF_GOOD: case FSF_GOOD:
port->handle = header->port_handle; port->handle = header->port_handle;
atomic_set_mask(ZFCP_STATUS_COMMON_OPEN | atomic_or(ZFCP_STATUS_COMMON_OPEN |
ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED, atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
&port->status); &port->status);
/* check whether D_ID has changed during open */ /* check whether D_ID has changed during open */
/* /*
...@@ -1677,10 +1677,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) ...@@ -1677,10 +1677,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
case FSF_PORT_BOXED: case FSF_PORT_BOXED:
/* can't use generic zfcp_erp_modify_port_status because /* can't use generic zfcp_erp_modify_port_status because
* ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
shost_for_each_device(sdev, port->adapter->scsi_host) shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port) if (sdev_to_zfcp(sdev)->port == port)
atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
&sdev_to_zfcp(sdev)->status); &sdev_to_zfcp(sdev)->status);
zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED); zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
...@@ -1700,10 +1700,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) ...@@ -1700,10 +1700,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
/* can't use generic zfcp_erp_modify_port_status because /* can't use generic zfcp_erp_modify_port_status because
* ZFCP_STATUS_COMMON_OPEN must not be reset for the port * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
*/ */
atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
shost_for_each_device(sdev, port->adapter->scsi_host) shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port) if (sdev_to_zfcp(sdev)->port == port)
atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
&sdev_to_zfcp(sdev)->status); &sdev_to_zfcp(sdev)->status);
break; break;
} }
...@@ -1766,7 +1766,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) ...@@ -1766,7 +1766,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
zfcp_sdev = sdev_to_zfcp(sdev); zfcp_sdev = sdev_to_zfcp(sdev);
atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED |
ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_STATUS_COMMON_ACCESS_BOXED,
&zfcp_sdev->status); &zfcp_sdev->status);
...@@ -1822,7 +1822,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) ...@@ -1822,7 +1822,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
case FSF_GOOD: case FSF_GOOD:
zfcp_sdev->lun_handle = header->lun_handle; zfcp_sdev->lun_handle = header->lun_handle;
atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
break; break;
} }
} }
...@@ -1913,7 +1913,7 @@ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req) ...@@ -1913,7 +1913,7 @@ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
} }
break; break;
case FSF_GOOD: case FSF_GOOD:
atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
break; break;
} }
} }
......
...@@ -349,7 +349,7 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio) ...@@ -349,7 +349,7 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
spin_lock_irq(&qdio->req_q_lock); spin_lock_irq(&qdio->req_q_lock);
atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
spin_unlock_irq(&qdio->req_q_lock); spin_unlock_irq(&qdio->req_q_lock);
wake_up(&qdio->req_q_wq); wake_up(&qdio->req_q_wq);
...@@ -384,7 +384,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) ...@@ -384,7 +384,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
return -EIO; return -EIO;
atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
&qdio->adapter->status); &qdio->adapter->status);
zfcp_qdio_setup_init_data(&init_data, qdio); zfcp_qdio_setup_init_data(&init_data, qdio);
...@@ -396,14 +396,14 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) ...@@ -396,14 +396,14 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
goto failed_qdio; goto failed_qdio;
if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED) if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED, atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
&qdio->adapter->status); &qdio->adapter->status);
if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) { if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
atomic_set_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER; qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
} else { } else {
atomic_clear_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1; qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
} }
...@@ -427,7 +427,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) ...@@ -427,7 +427,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
/* set index of first available SBALS / number of available SBALS */ /* set index of first available SBALS / number of available SBALS */
qdio->req_q_idx = 0; qdio->req_q_idx = 0;
atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q); atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
if (adapter->scsi_host) { if (adapter->scsi_host) {
adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req; adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
...@@ -499,6 +499,6 @@ void zfcp_qdio_siosl(struct zfcp_adapter *adapter) ...@@ -499,6 +499,6 @@ void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
rc = ccw_device_siosl(adapter->ccw_device); rc = ccw_device_siosl(adapter->ccw_device);
if (!rc) if (!rc)
atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
&adapter->status); &adapter->status);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment