Commit 7c116e02 authored by Arnd Bergmann's avatar Arnd Bergmann Committed by David S. Miller

qed: reduce maximum stack frame size

clang warns about an overly large stack frame in one function
when it decides to inline all __qed_get_vport_*() functions into
__qed_get_vport_stats():

drivers/net/ethernet/qlogic/qed/qed_l2.c:1889:13: error: stack frame size of 1128 bytes in function '_qed_get_vport_stats' [-Werror,-Wframe-larger-than=]

Use a noinline_for_stack annotation to prevent clang from inlining
these, which keeps the maximum stack usage at around half of that
in the worst case, similar to what we get with gcc.

Fixes: 86622ee7 ("qed: Move statistics to L2 code")
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 81cd17a4
...@@ -1631,10 +1631,9 @@ static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn, ...@@ -1631,10 +1631,9 @@ static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
} }
} }
static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, static noinline_for_stack void
struct qed_ptt *p_ptt, __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
struct qed_eth_stats *p_stats, struct qed_eth_stats *p_stats, u16 statistics_bin)
u16 statistics_bin)
{ {
struct eth_pstorm_per_queue_stat pstats; struct eth_pstorm_per_queue_stat pstats;
u32 pstats_addr = 0, pstats_len = 0; u32 pstats_addr = 0, pstats_len = 0;
...@@ -1661,10 +1660,9 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, ...@@ -1661,10 +1660,9 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
HILO_64_REGPAIR(pstats.error_drop_pkts); HILO_64_REGPAIR(pstats.error_drop_pkts);
} }
static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, static noinline_for_stack void
struct qed_ptt *p_ptt, __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
struct qed_eth_stats *p_stats, struct qed_eth_stats *p_stats, u16 statistics_bin)
u16 statistics_bin)
{ {
struct tstorm_per_port_stat tstats; struct tstorm_per_port_stat tstats;
u32 tstats_addr, tstats_len; u32 tstats_addr, tstats_len;
...@@ -1709,10 +1707,9 @@ static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, ...@@ -1709,10 +1707,9 @@ static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
} }
} }
static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, static noinline_for_stack
struct qed_ptt *p_ptt, void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
struct qed_eth_stats *p_stats, struct qed_eth_stats *p_stats, u16 statistics_bin)
u16 statistics_bin)
{ {
struct eth_ustorm_per_queue_stat ustats; struct eth_ustorm_per_queue_stat ustats;
u32 ustats_addr = 0, ustats_len = 0; u32 ustats_addr = 0, ustats_len = 0;
...@@ -1751,10 +1748,9 @@ static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, ...@@ -1751,10 +1748,9 @@ static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
} }
} }
static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, static noinline_for_stack void
struct qed_ptt *p_ptt, __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
struct qed_eth_stats *p_stats, struct qed_eth_stats *p_stats, u16 statistics_bin)
u16 statistics_bin)
{ {
struct eth_mstorm_per_queue_stat mstats; struct eth_mstorm_per_queue_stat mstats;
u32 mstats_addr = 0, mstats_len = 0; u32 mstats_addr = 0, mstats_len = 0;
...@@ -1780,8 +1776,8 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, ...@@ -1780,8 +1776,8 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
} }
static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, static noinline_for_stack void
struct qed_ptt *p_ptt, __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
struct qed_eth_stats *p_stats) struct qed_eth_stats *p_stats)
{ {
struct qed_eth_stats_common *p_common = &p_stats->common; struct qed_eth_stats_common *p_common = &p_stats->common;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment