Commit bb3ca38e authored by Michael Kelley's avatar Michael Kelley Committed by Wei Liu

hv_balloon: Use kernel macros to simplify open coded sequences

Code sequences equivalent to ALIGN(), ALIGN_DOWN(), and umin() are
currently open coded. Change these to use the kernel macro to
improve code clarity. ALIGN() and ALIGN_DOWN() require the
alignment value to be a power of 2, which is the case here.
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Signed-off-by: default avatarMichael Kelley <mhklinux@outlook.com>
Link: https://lore.kernel.org/r/20240503154312.142466-1-mhklinux@outlook.comSigned-off-by: default avatarWei Liu <wei.liu@kernel.org>
Message-ID: <20240503154312.142466-1-mhklinux@outlook.com>
parent 1613e604
...@@ -729,15 +729,8 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size, ...@@ -729,15 +729,8 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) { scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
has->ha_end_pfn += HA_CHUNK; has->ha_end_pfn += HA_CHUNK;
processed_pfn = umin(total_pfn, HA_CHUNK);
if (total_pfn > HA_CHUNK) { total_pfn -= processed_pfn;
processed_pfn = HA_CHUNK;
total_pfn -= HA_CHUNK;
} else {
processed_pfn = total_pfn;
total_pfn = 0;
}
has->covered_end_pfn += processed_pfn; has->covered_end_pfn += processed_pfn;
} }
...@@ -800,7 +793,7 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) ...@@ -800,7 +793,7 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
{ {
struct hv_hotadd_state *has; struct hv_hotadd_state *has;
struct hv_hotadd_gap *gap; struct hv_hotadd_gap *gap;
unsigned long residual, new_inc; unsigned long residual;
int ret = 0; int ret = 0;
guard(spinlock_irqsave)(&dm_device.ha_lock); guard(spinlock_irqsave)(&dm_device.ha_lock);
...@@ -836,15 +829,9 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) ...@@ -836,15 +829,9 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
* our current limit; extend it. * our current limit; extend it.
*/ */
if ((start_pfn + pfn_cnt) > has->end_pfn) { if ((start_pfn + pfn_cnt) > has->end_pfn) {
/* Extend the region by multiples of HA_CHUNK */
residual = (start_pfn + pfn_cnt - has->end_pfn); residual = (start_pfn + pfn_cnt - has->end_pfn);
/* has->end_pfn += ALIGN(residual, HA_CHUNK);
* Extend the region by multiples of HA_CHUNK.
*/
new_inc = (residual / HA_CHUNK) * HA_CHUNK;
if (residual % HA_CHUNK)
new_inc += HA_CHUNK;
has->end_pfn += new_inc;
} }
ret = 1; ret = 1;
...@@ -915,9 +902,7 @@ static unsigned long handle_pg_range(unsigned long pg_start, ...@@ -915,9 +902,7 @@ static unsigned long handle_pg_range(unsigned long pg_start,
*/ */
size = (has->end_pfn - has->ha_end_pfn); size = (has->end_pfn - has->ha_end_pfn);
if (pfn_cnt <= size) { if (pfn_cnt <= size) {
size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK); size = ALIGN(pfn_cnt, HA_CHUNK);
if (pfn_cnt % HA_CHUNK)
size += HA_CHUNK;
} else { } else {
pfn_cnt = size; pfn_cnt = size;
} }
...@@ -1011,9 +996,6 @@ static void hot_add_req(struct work_struct *dummy) ...@@ -1011,9 +996,6 @@ static void hot_add_req(struct work_struct *dummy)
rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt; rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
if ((rg_start == 0) && (!dm->host_specified_ha_region)) { if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
unsigned long region_size;
unsigned long region_start;
/* /*
* The host has not specified the hot-add region. * The host has not specified the hot-add region.
* Based on the hot-add page range being specified, * Based on the hot-add page range being specified,
...@@ -1021,14 +1003,8 @@ static void hot_add_req(struct work_struct *dummy) ...@@ -1021,14 +1003,8 @@ static void hot_add_req(struct work_struct *dummy)
* that need to be hot-added while ensuring the alignment * that need to be hot-added while ensuring the alignment
* and size requirements of Linux as it relates to hot-add. * and size requirements of Linux as it relates to hot-add.
*/ */
region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK; rg_start = ALIGN_DOWN(pg_start, HA_CHUNK);
if (pfn_cnt % HA_CHUNK) rg_sz = ALIGN(pfn_cnt, HA_CHUNK);
region_size += HA_CHUNK;
region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
rg_start = region_start;
rg_sz = region_size;
} }
if (do_hot_add) if (do_hot_add)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment