Commit ef1fe6b7 authored by Wambui Karuga's avatar Wambui Karuga Committed by Greg Kroah-Hartman

staging: octeon: remove typedef declaration for cvmx_wqe

Remove typedef declaration from struct cvmx_wqe.
Also replace its previous uses with new struct declaration.
Issue found by checkpatch.pl
Signed-off-by: default avatarWambui Karuga <wambui.karugax@gmail.com>
Acked-by: default avatarJulia Lawall <julia.lawall@lip6.fr>
Link: https://lore.kernel.org/r/fa82104ea8d7ff54dc66bfbfedb6cca541701991.1570821661.git.wambui.karugax@gmail.comSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 27d6e47f
...@@ -60,7 +60,7 @@ static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id) ...@@ -60,7 +60,7 @@ static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id)
* *
* Returns Non-zero if the packet can be dropped, zero otherwise. * Returns Non-zero if the packet can be dropped, zero otherwise.
*/ */
static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work)
{ {
int port; int port;
...@@ -135,7 +135,7 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) ...@@ -135,7 +135,7 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
return 0; return 0;
} }
static void copy_segments_to_skb(cvmx_wqe_t *work, struct sk_buff *skb) static void copy_segments_to_skb(struct cvmx_wqe *work, struct sk_buff *skb)
{ {
int segments = work->word2.s.bufs; int segments = work->word2.s.bufs;
union cvmx_buf_ptr segment_ptr = work->packet_ptr; union cvmx_buf_ptr segment_ptr = work->packet_ptr;
...@@ -215,7 +215,7 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget) ...@@ -215,7 +215,7 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
struct sk_buff **pskb = NULL; struct sk_buff **pskb = NULL;
int skb_in_hw; int skb_in_hw;
cvmx_wqe_t *work; struct cvmx_wqe *work;
int port; int port;
if (USE_ASYNC_IOBDMA && did_work_request) if (USE_ASYNC_IOBDMA && did_work_request)
......
...@@ -514,7 +514,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) ...@@ -514,7 +514,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
void *copy_location; void *copy_location;
/* Get a work queue entry */ /* Get a work queue entry */
cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL); struct cvmx_wqe *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
if (unlikely(!work)) { if (unlikely(!work)) {
printk_ratelimited("%s: Failed to allocate a work queue entry\n", printk_ratelimited("%s: Failed to allocate a work queue entry\n",
......
...@@ -172,7 +172,7 @@ static void cvm_oct_configure_common_hw(void) ...@@ -172,7 +172,7 @@ static void cvm_oct_configure_common_hw(void)
*/ */
int cvm_oct_free_work(void *work_queue_entry) int cvm_oct_free_work(void *work_queue_entry)
{ {
cvmx_wqe_t *work = work_queue_entry; struct cvmx_wqe *work = work_queue_entry;
int segments = work->word2.s.bufs; int segments = work->word2.s.bufs;
union cvmx_buf_ptr segment_ptr = work->packet_ptr; union cvmx_buf_ptr segment_ptr = work->packet_ptr;
......
...@@ -183,13 +183,13 @@ union cvmx_buf_ptr { ...@@ -183,13 +183,13 @@ union cvmx_buf_ptr {
} s; } s;
}; };
typedef struct { struct cvmx_wqe {
union cvmx_wqe_word0 word0; union cvmx_wqe_word0 word0;
union cvmx_wqe_word1 word1; union cvmx_wqe_word1 word1;
union cvmx_pip_wqe_word2 word2; union cvmx_pip_wqe_word2 word2;
union cvmx_buf_ptr packet_ptr; union cvmx_buf_ptr packet_ptr;
uint8_t packet_data[96]; uint8_t packet_data[96];
} cvmx_wqe_t; };
typedef union { typedef union {
uint64_t u64; uint64_t u64;
...@@ -1198,7 +1198,7 @@ static inline uint64_t cvmx_scratch_read64(uint64_t address) ...@@ -1198,7 +1198,7 @@ static inline uint64_t cvmx_scratch_read64(uint64_t address)
static inline void cvmx_scratch_write64(uint64_t address, uint64_t value) static inline void cvmx_scratch_write64(uint64_t address, uint64_t value)
{ } { }
static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work) static inline int cvmx_wqe_get_grp(struct cvmx_wqe *work)
{ {
return 0; return 0;
} }
...@@ -1345,14 +1345,14 @@ static inline void cvmx_pow_work_request_async(int scr_addr, ...@@ -1345,14 +1345,14 @@ static inline void cvmx_pow_work_request_async(int scr_addr,
cvmx_pow_wait_t wait) cvmx_pow_wait_t wait)
{ } { }
static inline cvmx_wqe_t *cvmx_pow_work_response_async(int scr_addr) static inline struct cvmx_wqe *cvmx_pow_work_response_async(int scr_addr)
{ {
cvmx_wqe_t *wqe = (void *)(unsigned long)scr_addr; struct cvmx_wqe *wqe = (void *)(unsigned long)scr_addr;
return wqe; return wqe;
} }
static inline cvmx_wqe_t *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait) static inline struct cvmx_wqe *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
{ {
return (void *)(unsigned long)wait; return (void *)(unsigned long)wait;
} }
...@@ -1390,21 +1390,21 @@ static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(uint64_t port, ...@@ -1390,21 +1390,21 @@ static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(uint64_t port,
return ret; return ret;
} }
static inline void cvmx_wqe_set_port(cvmx_wqe_t *work, int port) static inline void cvmx_wqe_set_port(struct cvmx_wqe *work, int port)
{ } { }
static inline void cvmx_wqe_set_qos(cvmx_wqe_t *work, int qos) static inline void cvmx_wqe_set_qos(struct cvmx_wqe *work, int qos)
{ } { }
static inline int cvmx_wqe_get_qos(cvmx_wqe_t *work) static inline int cvmx_wqe_get_qos(struct cvmx_wqe *work)
{ {
return 0; return 0;
} }
static inline void cvmx_wqe_set_grp(cvmx_wqe_t *work, int grp) static inline void cvmx_wqe_set_grp(struct cvmx_wqe *work, int grp)
{ } { }
static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag, static inline void cvmx_pow_work_submit(struct cvmx_wqe *wqp, uint32_t tag,
enum cvmx_pow_tag_type tag_type, enum cvmx_pow_tag_type tag_type,
uint64_t qos, uint64_t grp) uint64_t qos, uint64_t grp)
{ } { }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment