Commit 0f1959f5 authored by KOSAKI Motohiro's avatar KOSAKI Motohiro Committed by Martin Schwidefsky

[S390] convert old cpumask API into new one

Adapt new API.
Signed-off-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 7dd8fe1f
...@@ -50,7 +50,7 @@ static inline void __tlb_flush_full(struct mm_struct *mm) ...@@ -50,7 +50,7 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
/* /*
* If the process only ran on the local cpu, do a local flush. * If the process only ran on the local cpu, do a local flush.
*/ */
local_cpumask = cpumask_of_cpu(smp_processor_id()); cpumask_copy(&local_cpumask, cpumask_of(smp_processor_id()));
if (cpumask_equal(mm_cpumask(mm), &local_cpumask)) if (cpumask_equal(mm_cpumask(mm), &local_cpumask))
__tlb_flush_local(); __tlb_flush_local();
else else
......
...@@ -335,7 +335,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail) ...@@ -335,7 +335,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail)
smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
if (!cpu_stopped(logical_cpu)) if (!cpu_stopped(logical_cpu))
continue; continue;
cpu_set(logical_cpu, cpu_present_map); set_cpu_present(logical_cpu, true);
smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
logical_cpu = cpumask_next(logical_cpu, &avail); logical_cpu = cpumask_next(logical_cpu, &avail);
if (logical_cpu >= nr_cpu_ids) if (logical_cpu >= nr_cpu_ids)
...@@ -367,7 +367,7 @@ static int smp_rescan_cpus_sclp(cpumask_t avail) ...@@ -367,7 +367,7 @@ static int smp_rescan_cpus_sclp(cpumask_t avail)
continue; continue;
__cpu_logical_map[logical_cpu] = cpu_id; __cpu_logical_map[logical_cpu] = cpu_id;
smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
cpu_set(logical_cpu, cpu_present_map); set_cpu_present(logical_cpu, true);
if (cpu >= info->configured) if (cpu >= info->configured)
smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
else else
...@@ -385,7 +385,7 @@ static int __smp_rescan_cpus(void) ...@@ -385,7 +385,7 @@ static int __smp_rescan_cpus(void)
{ {
cpumask_t avail; cpumask_t avail;
cpus_xor(avail, cpu_possible_map, cpu_present_map); cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
if (smp_use_sigp_detection) if (smp_use_sigp_detection)
return smp_rescan_cpus_sigp(avail); return smp_rescan_cpus_sigp(avail);
else else
...@@ -467,7 +467,7 @@ int __cpuinit start_secondary(void *cpuvoid) ...@@ -467,7 +467,7 @@ int __cpuinit start_secondary(void *cpuvoid)
notify_cpu_starting(smp_processor_id()); notify_cpu_starting(smp_processor_id());
/* Mark this cpu as online */ /* Mark this cpu as online */
ipi_call_lock(); ipi_call_lock();
cpu_set(smp_processor_id(), cpu_online_map); set_cpu_online(smp_processor_id(), true);
ipi_call_unlock(); ipi_call_unlock();
/* Switch on interrupts */ /* Switch on interrupts */
local_irq_enable(); local_irq_enable();
...@@ -644,7 +644,7 @@ int __cpu_disable(void) ...@@ -644,7 +644,7 @@ int __cpu_disable(void)
struct ec_creg_mask_parms cr_parms; struct ec_creg_mask_parms cr_parms;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
cpu_clear(cpu, cpu_online_map); set_cpu_online(cpu, false);
/* Disable pfault pseudo page faults on this cpu. */ /* Disable pfault pseudo page faults on this cpu. */
pfault_fini(); pfault_fini();
...@@ -738,8 +738,8 @@ void __init smp_prepare_boot_cpu(void) ...@@ -738,8 +738,8 @@ void __init smp_prepare_boot_cpu(void)
BUG_ON(smp_processor_id() != 0); BUG_ON(smp_processor_id() != 0);
current_thread_info()->cpu = 0; current_thread_info()->cpu = 0;
cpu_set(0, cpu_present_map); set_cpu_present(0, true);
cpu_set(0, cpu_online_map); set_cpu_online(0, true);
S390_lowcore.percpu_offset = __per_cpu_offset[0]; S390_lowcore.percpu_offset = __per_cpu_offset[0];
current_set[0] = current; current_set[0] = current;
smp_cpu_state[0] = CPU_STATE_CONFIGURED; smp_cpu_state[0] = CPU_STATE_CONFIGURED;
...@@ -1016,21 +1016,21 @@ int __ref smp_rescan_cpus(void) ...@@ -1016,21 +1016,21 @@ int __ref smp_rescan_cpus(void)
get_online_cpus(); get_online_cpus();
mutex_lock(&smp_cpu_state_mutex); mutex_lock(&smp_cpu_state_mutex);
newcpus = cpu_present_map; cpumask_copy(&newcpus, cpu_present_mask);
rc = __smp_rescan_cpus(); rc = __smp_rescan_cpus();
if (rc) if (rc)
goto out; goto out;
cpus_andnot(newcpus, cpu_present_map, newcpus); cpumask_andnot(&newcpus, cpu_present_mask, &newcpus);
for_each_cpu_mask(cpu, newcpus) { for_each_cpu(cpu, &newcpus) {
rc = smp_add_present_cpu(cpu); rc = smp_add_present_cpu(cpu);
if (rc) if (rc)
cpu_clear(cpu, cpu_present_map); set_cpu_present(cpu, false);
} }
rc = 0; rc = 0;
out: out:
mutex_unlock(&smp_cpu_state_mutex); mutex_unlock(&smp_cpu_state_mutex);
put_online_cpus(); put_online_cpus();
if (!cpus_empty(newcpus)) if (!cpumask_empty(&newcpus))
topology_schedule_update(); topology_schedule_update();
return rc; return rc;
} }
......
...@@ -810,7 +810,7 @@ static int etr_sync_clock_stop(struct etr_aib *aib, int port) ...@@ -810,7 +810,7 @@ static int etr_sync_clock_stop(struct etr_aib *aib, int port)
etr_sync.etr_port = port; etr_sync.etr_port = port;
get_online_cpus(); get_online_cpus();
atomic_set(&etr_sync.cpus, num_online_cpus() - 1); atomic_set(&etr_sync.cpus, num_online_cpus() - 1);
rc = stop_machine(etr_sync_clock, &etr_sync, &cpu_online_map); rc = stop_machine(etr_sync_clock, &etr_sync, cpu_online_mask);
put_online_cpus(); put_online_cpus();
return rc; return rc;
} }
...@@ -1579,7 +1579,7 @@ static void stp_work_fn(struct work_struct *work) ...@@ -1579,7 +1579,7 @@ static void stp_work_fn(struct work_struct *work)
memset(&stp_sync, 0, sizeof(stp_sync)); memset(&stp_sync, 0, sizeof(stp_sync));
get_online_cpus(); get_online_cpus();
atomic_set(&stp_sync.cpus, num_online_cpus() - 1); atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
stop_machine(stp_sync_clock, &stp_sync, &cpu_online_map); stop_machine(stp_sync_clock, &stp_sync, cpu_online_mask);
put_online_cpus(); put_online_cpus();
if (!check_sync_clock()) if (!check_sync_clock())
......
...@@ -52,20 +52,20 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) ...@@ -52,20 +52,20 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{ {
cpumask_t mask; cpumask_t mask;
cpus_clear(mask); cpumask_clear(&mask);
if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) { if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
cpumask_copy(&mask, cpumask_of(cpu)); cpumask_copy(&mask, cpumask_of(cpu));
return mask; return mask;
} }
while (info) { while (info) {
if (cpu_isset(cpu, info->mask)) { if (cpumask_test_cpu(cpu, &info->mask)) {
mask = info->mask; mask = info->mask;
break; break;
} }
info = info->next; info = info->next;
} }
if (cpus_empty(mask)) if (cpumask_empty(&mask))
mask = cpumask_of_cpu(cpu); cpumask_copy(&mask, cpumask_of(cpu));
return mask; return mask;
} }
...@@ -85,10 +85,10 @@ static void add_cpus_to_mask(struct topology_cpu *tl_cpu, ...@@ -85,10 +85,10 @@ static void add_cpus_to_mask(struct topology_cpu *tl_cpu,
if (cpu_logical_map(lcpu) != rcpu) if (cpu_logical_map(lcpu) != rcpu)
continue; continue;
#ifdef CONFIG_SCHED_BOOK #ifdef CONFIG_SCHED_BOOK
cpu_set(lcpu, book->mask); cpumask_set_cpu(lcpu, &book->mask);
cpu_book_id[lcpu] = book->id; cpu_book_id[lcpu] = book->id;
#endif #endif
cpu_set(lcpu, core->mask); cpumask_set_cpu(lcpu, &core->mask);
cpu_core_id[lcpu] = core->id; cpu_core_id[lcpu] = core->id;
smp_cpu_polarization[lcpu] = tl_cpu->pp; smp_cpu_polarization[lcpu] = tl_cpu->pp;
} }
...@@ -101,13 +101,13 @@ static void clear_masks(void) ...@@ -101,13 +101,13 @@ static void clear_masks(void)
info = &core_info; info = &core_info;
while (info) { while (info) {
cpus_clear(info->mask); cpumask_clear(&info->mask);
info = info->next; info = info->next;
} }
#ifdef CONFIG_SCHED_BOOK #ifdef CONFIG_SCHED_BOOK
info = &book_info; info = &book_info;
while (info) { while (info) {
cpus_clear(info->mask); cpumask_clear(&info->mask);
info = info->next; info = info->next;
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment