Commit 92cf1f23 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch

Jesse Gross says:

====================
A number of improvements for net-next/3.10.

Highlights include:

 * Properly exposing linux/openvswitch.h to userspace after the uapi
   changes.

 * Simplification of locking. It immediately makes things simpler to
   reason about and avoids holding RTNL mutex for longer than
   necessary. In the near future it will also enable tunnel
   registration and more fine-grained locking.

 * Miscellaneous cleanups and simplifications.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 98d2f0e6 e0f0ecf3
This diff is collapsed.
...@@ -285,6 +285,7 @@ header-y += nvram.h ...@@ -285,6 +285,7 @@ header-y += nvram.h
header-y += omap3isp.h header-y += omap3isp.h
header-y += omapfb.h header-y += omapfb.h
header-y += oom.h header-y += oom.h
header-y += openvswitch.h
header-y += packet_diag.h header-y += packet_diag.h
header-y += param.h header-y += param.h
header-y += parport.h header-y += parport.h
......
This diff is collapsed.
This diff is collapsed.
...@@ -57,10 +57,9 @@ struct dp_stats_percpu { ...@@ -57,10 +57,9 @@ struct dp_stats_percpu {
* struct datapath - datapath for flow-based packet switching * struct datapath - datapath for flow-based packet switching
* @rcu: RCU callback head for deferred destruction. * @rcu: RCU callback head for deferred destruction.
* @list_node: Element in global 'dps' list. * @list_node: Element in global 'dps' list.
* @n_flows: Number of flows currently in flow table. * @table: Current flow table. Protected by ovs_mutex and RCU.
* @table: Current flow table. Protected by genl_lock and RCU.
* @ports: Hash table for ports. %OVSP_LOCAL port always exists. Protected by * @ports: Hash table for ports. %OVSP_LOCAL port always exists. Protected by
* RTNL and RCU. * ovs_mutex and RCU.
* @stats_percpu: Per-CPU datapath statistics. * @stats_percpu: Per-CPU datapath statistics.
* @net: Reference to net namespace. * @net: Reference to net namespace.
* *
...@@ -86,26 +85,6 @@ struct datapath { ...@@ -86,26 +85,6 @@ struct datapath {
#endif #endif
}; };
struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no);
static inline struct vport *ovs_vport_rcu(const struct datapath *dp, int port_no)
{
WARN_ON_ONCE(!rcu_read_lock_held());
return ovs_lookup_vport(dp, port_no);
}
static inline struct vport *ovs_vport_rtnl_rcu(const struct datapath *dp, int port_no)
{
WARN_ON_ONCE(!rcu_read_lock_held() && !rtnl_is_locked());
return ovs_lookup_vport(dp, port_no);
}
static inline struct vport *ovs_vport_rtnl(const struct datapath *dp, int port_no)
{
ASSERT_RTNL();
return ovs_lookup_vport(dp, port_no);
}
/** /**
* struct ovs_skb_cb - OVS data in skb CB * struct ovs_skb_cb - OVS data in skb CB
* @flow: The flow associated with this packet. May be %NULL if no flow. * @flow: The flow associated with this packet. May be %NULL if no flow.
...@@ -132,6 +111,30 @@ struct dp_upcall_info { ...@@ -132,6 +111,30 @@ struct dp_upcall_info {
u32 portid; u32 portid;
}; };
/**
* struct ovs_net - Per net-namespace data for ovs.
* @dps: List of datapaths to enable dumping them all out.
* Protected by genl_mutex.
*/
struct ovs_net {
struct list_head dps;
struct work_struct dp_notify_work;
};
extern int ovs_net_id;
void ovs_lock(void);
void ovs_unlock(void);
#ifdef CONFIG_LOCKDEP
int lockdep_ovsl_is_held(void);
#else
#define lockdep_ovsl_is_held() 1
#endif
#define ASSERT_OVSL() WARN_ON(unlikely(!lockdep_ovsl_is_held()))
#define ovsl_dereference(p) \
rcu_dereference_protected(p, lockdep_ovsl_is_held())
static inline struct net *ovs_dp_get_net(struct datapath *dp) static inline struct net *ovs_dp_get_net(struct datapath *dp)
{ {
return read_pnet(&dp->net); return read_pnet(&dp->net);
...@@ -142,6 +145,26 @@ static inline void ovs_dp_set_net(struct datapath *dp, struct net *net) ...@@ -142,6 +145,26 @@ static inline void ovs_dp_set_net(struct datapath *dp, struct net *net)
write_pnet(&dp->net, net); write_pnet(&dp->net, net);
} }
struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no);
static inline struct vport *ovs_vport_rcu(const struct datapath *dp, int port_no)
{
WARN_ON_ONCE(!rcu_read_lock_held());
return ovs_lookup_vport(dp, port_no);
}
static inline struct vport *ovs_vport_ovsl_rcu(const struct datapath *dp, int port_no)
{
WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
return ovs_lookup_vport(dp, port_no);
}
static inline struct vport *ovs_vport_ovsl(const struct datapath *dp, int port_no)
{
ASSERT_OVSL();
return ovs_lookup_vport(dp, port_no);
}
extern struct notifier_block ovs_dp_device_notifier; extern struct notifier_block ovs_dp_device_notifier;
extern struct genl_multicast_group ovs_dp_vport_multicast_group; extern struct genl_multicast_group ovs_dp_vport_multicast_group;
...@@ -155,4 +178,5 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq, ...@@ -155,4 +178,5 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq,
u8 cmd); u8 cmd);
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb); int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb);
void ovs_dp_notify_wq(struct work_struct *work);
#endif /* datapath.h */ #endif /* datapath.h */
...@@ -18,46 +18,78 @@ ...@@ -18,46 +18,78 @@
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <net/genetlink.h> #include <net/genetlink.h>
#include <net/netns/generic.h>
#include "datapath.h" #include "datapath.h"
#include "vport-internal_dev.h" #include "vport-internal_dev.h"
#include "vport-netdev.h" #include "vport-netdev.h"
static void dp_detach_port_notify(struct vport *vport)
{
struct sk_buff *notify;
struct datapath *dp;
dp = vport->dp;
notify = ovs_vport_cmd_build_info(vport, 0, 0,
OVS_VPORT_CMD_DEL);
ovs_dp_detach_port(vport);
if (IS_ERR(notify)) {
netlink_set_err(ovs_dp_get_net(dp)->genl_sock, 0,
ovs_dp_vport_multicast_group.id,
PTR_ERR(notify));
return;
}
genlmsg_multicast_netns(ovs_dp_get_net(dp), notify, 0,
ovs_dp_vport_multicast_group.id,
GFP_KERNEL);
}
void ovs_dp_notify_wq(struct work_struct *work)
{
struct ovs_net *ovs_net = container_of(work, struct ovs_net, dp_notify_work);
struct datapath *dp;
ovs_lock();
list_for_each_entry(dp, &ovs_net->dps, list_node) {
int i;
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
struct vport *vport;
struct hlist_node *n;
hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) {
struct netdev_vport *netdev_vport;
if (vport->ops->type != OVS_VPORT_TYPE_NETDEV)
continue;
netdev_vport = netdev_vport_priv(vport);
if (netdev_vport->dev->reg_state == NETREG_UNREGISTERED ||
netdev_vport->dev->reg_state == NETREG_UNREGISTERING)
dp_detach_port_notify(vport);
}
}
}
ovs_unlock();
}
static int dp_device_event(struct notifier_block *unused, unsigned long event, static int dp_device_event(struct notifier_block *unused, unsigned long event,
void *ptr) void *ptr)
{ {
struct ovs_net *ovs_net;
struct net_device *dev = ptr; struct net_device *dev = ptr;
struct vport *vport; struct vport *vport = NULL;
if (ovs_is_internal_dev(dev)) if (!ovs_is_internal_dev(dev))
vport = ovs_internal_dev_get_vport(dev);
else
vport = ovs_netdev_get_vport(dev); vport = ovs_netdev_get_vport(dev);
if (!vport) if (!vport)
return NOTIFY_DONE; return NOTIFY_DONE;
switch (event) { if (event == NETDEV_UNREGISTER) {
case NETDEV_UNREGISTER: ovs_net = net_generic(dev_net(dev), ovs_net_id);
if (!ovs_is_internal_dev(dev)) { queue_work(system_wq, &ovs_net->dp_notify_work);
struct sk_buff *notify;
struct datapath *dp = vport->dp;
notify = ovs_vport_cmd_build_info(vport, 0, 0,
OVS_VPORT_CMD_DEL);
ovs_dp_detach_port(vport);
if (IS_ERR(notify)) {
netlink_set_err(ovs_dp_get_net(dp)->genl_sock, 0,
ovs_dp_vport_multicast_group.id,
PTR_ERR(notify));
break;
}
genlmsg_multicast_netns(ovs_dp_get_net(dp), notify, 0,
ovs_dp_vport_multicast_group.id,
GFP_KERNEL);
}
break;
} }
return NOTIFY_DONE; return NOTIFY_DONE;
......
...@@ -211,7 +211,7 @@ struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions) ...@@ -211,7 +211,7 @@ struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
sfa->actions_len = actions_len; sfa->actions_len = actions_len;
memcpy(sfa->actions, nla_data(actions), actions_len); nla_memcpy(sfa->actions, actions, actions_len);
return sfa; return sfa;
} }
......
...@@ -138,27 +138,6 @@ int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *, ...@@ -138,27 +138,6 @@ int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *,
void ovs_flow_used(struct sw_flow *, struct sk_buff *); void ovs_flow_used(struct sw_flow *, struct sk_buff *);
u64 ovs_flow_used_time(unsigned long flow_jiffies); u64 ovs_flow_used_time(unsigned long flow_jiffies);
/* Upper bound on the length of a nlattr-formatted flow key. The longest
* nlattr-formatted flow key would be:
*
* struct pad nl hdr total
* ------ --- ------ -----
* OVS_KEY_ATTR_PRIORITY 4 -- 4 8
* OVS_KEY_ATTR_IN_PORT 4 -- 4 8
* OVS_KEY_ATTR_SKB_MARK 4 -- 4 8
* OVS_KEY_ATTR_ETHERNET 12 -- 4 16
* OVS_KEY_ATTR_ETHERTYPE 2 2 4 8 (outer VLAN ethertype)
* OVS_KEY_ATTR_8021Q 4 -- 4 8
* OVS_KEY_ATTR_ENCAP 0 -- 4 4 (VLAN encapsulation)
* OVS_KEY_ATTR_ETHERTYPE 2 2 4 8 (inner VLAN ethertype)
* OVS_KEY_ATTR_IPV6 40 -- 4 44
* OVS_KEY_ATTR_ICMPV6 2 2 4 8
* OVS_KEY_ATTR_ND 28 -- 4 32
* -------------------------------------------------
* total 152
*/
#define FLOW_BUFSIZE 152
int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *); int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *);
int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
const struct nlattr *); const struct nlattr *);
......
...@@ -173,16 +173,19 @@ static struct vport *internal_dev_create(const struct vport_parms *parms) ...@@ -173,16 +173,19 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
if (vport->port_no == OVSP_LOCAL) if (vport->port_no == OVSP_LOCAL)
netdev_vport->dev->features |= NETIF_F_NETNS_LOCAL; netdev_vport->dev->features |= NETIF_F_NETNS_LOCAL;
rtnl_lock();
err = register_netdevice(netdev_vport->dev); err = register_netdevice(netdev_vport->dev);
if (err) if (err)
goto error_free_netdev; goto error_free_netdev;
dev_set_promiscuity(netdev_vport->dev, 1); dev_set_promiscuity(netdev_vport->dev, 1);
rtnl_unlock();
netif_start_queue(netdev_vport->dev); netif_start_queue(netdev_vport->dev);
return vport; return vport;
error_free_netdev: error_free_netdev:
rtnl_unlock();
free_netdev(netdev_vport->dev); free_netdev(netdev_vport->dev);
error_free_vport: error_free_vport:
ovs_vport_free(vport); ovs_vport_free(vport);
...@@ -195,10 +198,13 @@ static void internal_dev_destroy(struct vport *vport) ...@@ -195,10 +198,13 @@ static void internal_dev_destroy(struct vport *vport)
struct netdev_vport *netdev_vport = netdev_vport_priv(vport); struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
netif_stop_queue(netdev_vport->dev); netif_stop_queue(netdev_vport->dev);
rtnl_lock();
dev_set_promiscuity(netdev_vport->dev, -1); dev_set_promiscuity(netdev_vport->dev, -1);
/* unregister_netdevice() waits for an RCU grace period. */ /* unregister_netdevice() waits for an RCU grace period. */
unregister_netdevice(netdev_vport->dev); unregister_netdevice(netdev_vport->dev);
rtnl_unlock();
} }
static int internal_dev_recv(struct vport *vport, struct sk_buff *skb) static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
......
...@@ -100,16 +100,20 @@ static struct vport *netdev_create(const struct vport_parms *parms) ...@@ -100,16 +100,20 @@ static struct vport *netdev_create(const struct vport_parms *parms)
goto error_put; goto error_put;
} }
rtnl_lock();
err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook, err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
vport); vport);
if (err) if (err)
goto error_put; goto error_unlock;
dev_set_promiscuity(netdev_vport->dev, 1); dev_set_promiscuity(netdev_vport->dev, 1);
netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH; netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
rtnl_unlock();
return vport; return vport;
error_unlock:
rtnl_unlock();
error_put: error_put:
dev_put(netdev_vport->dev); dev_put(netdev_vport->dev);
error_free_vport: error_free_vport:
...@@ -131,9 +135,11 @@ static void netdev_destroy(struct vport *vport) ...@@ -131,9 +135,11 @@ static void netdev_destroy(struct vport *vport)
{ {
struct netdev_vport *netdev_vport = netdev_vport_priv(vport); struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
rtnl_lock();
netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH; netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
netdev_rx_handler_unregister(netdev_vport->dev); netdev_rx_handler_unregister(netdev_vport->dev);
dev_set_promiscuity(netdev_vport->dev, -1); dev_set_promiscuity(netdev_vport->dev, -1);
rtnl_unlock();
call_rcu(&netdev_vport->rcu, free_port_rcu); call_rcu(&netdev_vport->rcu, free_port_rcu);
} }
......
...@@ -40,7 +40,7 @@ static const struct vport_ops *vport_ops_list[] = { ...@@ -40,7 +40,7 @@ static const struct vport_ops *vport_ops_list[] = {
&ovs_internal_vport_ops, &ovs_internal_vport_ops,
}; };
/* Protected by RCU read lock for reading, RTNL lock for writing. */ /* Protected by RCU read lock for reading, ovs_mutex for writing. */
static struct hlist_head *dev_table; static struct hlist_head *dev_table;
#define VPORT_HASH_BUCKETS 1024 #define VPORT_HASH_BUCKETS 1024
...@@ -80,7 +80,7 @@ static struct hlist_head *hash_bucket(struct net *net, const char *name) ...@@ -80,7 +80,7 @@ static struct hlist_head *hash_bucket(struct net *net, const char *name)
* *
* @name: name of port to find * @name: name of port to find
* *
* Must be called with RTNL or RCU read lock. * Must be called with ovs or RCU read lock.
*/ */
struct vport *ovs_vport_locate(struct net *net, const char *name) struct vport *ovs_vport_locate(struct net *net, const char *name)
{ {
...@@ -128,7 +128,7 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops, ...@@ -128,7 +128,7 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
vport->ops = ops; vport->ops = ops;
INIT_HLIST_NODE(&vport->dp_hash_node); INIT_HLIST_NODE(&vport->dp_hash_node);
vport->percpu_stats = alloc_percpu(struct vport_percpu_stats); vport->percpu_stats = alloc_percpu(struct pcpu_tstats);
if (!vport->percpu_stats) { if (!vport->percpu_stats) {
kfree(vport); kfree(vport);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -161,7 +161,7 @@ void ovs_vport_free(struct vport *vport) ...@@ -161,7 +161,7 @@ void ovs_vport_free(struct vport *vport)
* @parms: Information about new vport. * @parms: Information about new vport.
* *
* Creates a new vport with the specified configuration (which is dependent on * Creates a new vport with the specified configuration (which is dependent on
* device type). RTNL lock must be held. * device type). ovs_mutex must be held.
*/ */
struct vport *ovs_vport_add(const struct vport_parms *parms) struct vport *ovs_vport_add(const struct vport_parms *parms)
{ {
...@@ -169,8 +169,6 @@ struct vport *ovs_vport_add(const struct vport_parms *parms) ...@@ -169,8 +169,6 @@ struct vport *ovs_vport_add(const struct vport_parms *parms)
int err = 0; int err = 0;
int i; int i;
ASSERT_RTNL();
for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) { for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) {
if (vport_ops_list[i]->type == parms->type) { if (vport_ops_list[i]->type == parms->type) {
struct hlist_head *bucket; struct hlist_head *bucket;
...@@ -201,12 +199,10 @@ struct vport *ovs_vport_add(const struct vport_parms *parms) ...@@ -201,12 +199,10 @@ struct vport *ovs_vport_add(const struct vport_parms *parms)
* @port: New configuration. * @port: New configuration.
* *
* Modifies an existing device with the specified configuration (which is * Modifies an existing device with the specified configuration (which is
* dependent on device type). RTNL lock must be held. * dependent on device type). ovs_mutex must be held.
*/ */
int ovs_vport_set_options(struct vport *vport, struct nlattr *options) int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
{ {
ASSERT_RTNL();
if (!vport->ops->set_options) if (!vport->ops->set_options)
return -EOPNOTSUPP; return -EOPNOTSUPP;
return vport->ops->set_options(vport, options); return vport->ops->set_options(vport, options);
...@@ -218,11 +214,11 @@ int ovs_vport_set_options(struct vport *vport, struct nlattr *options) ...@@ -218,11 +214,11 @@ int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
* @vport: vport to delete. * @vport: vport to delete.
* *
* Detaches @vport from its datapath and destroys it. It is possible to fail * Detaches @vport from its datapath and destroys it. It is possible to fail
* for reasons such as lack of memory. RTNL lock must be held. * for reasons such as lack of memory. ovs_mutex must be held.
*/ */
void ovs_vport_del(struct vport *vport) void ovs_vport_del(struct vport *vport)
{ {
ASSERT_RTNL(); ASSERT_OVSL();
hlist_del_rcu(&vport->hash_node); hlist_del_rcu(&vport->hash_node);
...@@ -237,7 +233,7 @@ void ovs_vport_del(struct vport *vport) ...@@ -237,7 +233,7 @@ void ovs_vport_del(struct vport *vport)
* *
* Retrieves transmit, receive, and error stats for the given device. * Retrieves transmit, receive, and error stats for the given device.
* *
* Must be called with RTNL lock or rcu_read_lock. * Must be called with ovs_mutex or rcu_read_lock.
*/ */
void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats) void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
{ {
...@@ -264,16 +260,16 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats) ...@@ -264,16 +260,16 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
spin_unlock_bh(&vport->stats_lock); spin_unlock_bh(&vport->stats_lock);
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
const struct vport_percpu_stats *percpu_stats; const struct pcpu_tstats *percpu_stats;
struct vport_percpu_stats local_stats; struct pcpu_tstats local_stats;
unsigned int start; unsigned int start;
percpu_stats = per_cpu_ptr(vport->percpu_stats, i); percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
do { do {
start = u64_stats_fetch_begin_bh(&percpu_stats->sync); start = u64_stats_fetch_begin_bh(&percpu_stats->syncp);
local_stats = *percpu_stats; local_stats = *percpu_stats;
} while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start)); } while (u64_stats_fetch_retry_bh(&percpu_stats->syncp, start));
stats->rx_bytes += local_stats.rx_bytes; stats->rx_bytes += local_stats.rx_bytes;
stats->rx_packets += local_stats.rx_packets; stats->rx_packets += local_stats.rx_packets;
...@@ -296,22 +292,24 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats) ...@@ -296,22 +292,24 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
* negative error code if a real error occurred. If an error occurs, @skb is * negative error code if a real error occurred. If an error occurs, @skb is
* left unmodified. * left unmodified.
* *
* Must be called with RTNL lock or rcu_read_lock. * Must be called with ovs_mutex or rcu_read_lock.
*/ */
int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb) int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
{ {
struct nlattr *nla; struct nlattr *nla;
int err;
if (!vport->ops->get_options)
return 0;
nla = nla_nest_start(skb, OVS_VPORT_ATTR_OPTIONS); nla = nla_nest_start(skb, OVS_VPORT_ATTR_OPTIONS);
if (!nla) if (!nla)
return -EMSGSIZE; return -EMSGSIZE;
if (vport->ops->get_options) { err = vport->ops->get_options(vport, skb);
int err = vport->ops->get_options(vport, skb); if (err) {
if (err) { nla_nest_cancel(skb, nla);
nla_nest_cancel(skb, nla); return err;
return err;
}
} }
nla_nest_end(skb, nla); nla_nest_end(skb, nla);
...@@ -329,13 +327,13 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb) ...@@ -329,13 +327,13 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
*/ */
void ovs_vport_receive(struct vport *vport, struct sk_buff *skb) void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
{ {
struct vport_percpu_stats *stats; struct pcpu_tstats *stats;
stats = this_cpu_ptr(vport->percpu_stats); stats = this_cpu_ptr(vport->percpu_stats);
u64_stats_update_begin(&stats->sync); u64_stats_update_begin(&stats->syncp);
stats->rx_packets++; stats->rx_packets++;
stats->rx_bytes += skb->len; stats->rx_bytes += skb->len;
u64_stats_update_end(&stats->sync); u64_stats_update_end(&stats->syncp);
ovs_dp_process_received_packet(vport, skb); ovs_dp_process_received_packet(vport, skb);
} }
...@@ -346,7 +344,7 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb) ...@@ -346,7 +344,7 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
* @vport: vport on which to send the packet * @vport: vport on which to send the packet
* @skb: skb to send * @skb: skb to send
* *
* Sends the given packet and returns the length of data sent. Either RTNL * Sends the given packet and returns the length of data sent. Either ovs
* lock or rcu_read_lock must be held. * lock or rcu_read_lock must be held.
*/ */
int ovs_vport_send(struct vport *vport, struct sk_buff *skb) int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
...@@ -354,14 +352,14 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb) ...@@ -354,14 +352,14 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
int sent = vport->ops->send(vport, skb); int sent = vport->ops->send(vport, skb);
if (likely(sent)) { if (likely(sent)) {
struct vport_percpu_stats *stats; struct pcpu_tstats *stats;
stats = this_cpu_ptr(vport->percpu_stats); stats = this_cpu_ptr(vport->percpu_stats);
u64_stats_update_begin(&stats->sync); u64_stats_update_begin(&stats->syncp);
stats->tx_packets++; stats->tx_packets++;
stats->tx_bytes += sent; stats->tx_bytes += sent;
u64_stats_update_end(&stats->sync); u64_stats_update_end(&stats->syncp);
} }
return sent; return sent;
} }
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#ifndef VPORT_H #ifndef VPORT_H
#define VPORT_H 1 #define VPORT_H 1
#include <linux/if_tunnel.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/netlink.h> #include <linux/netlink.h>
#include <linux/openvswitch.h> #include <linux/openvswitch.h>
...@@ -50,14 +51,6 @@ int ovs_vport_send(struct vport *, struct sk_buff *); ...@@ -50,14 +51,6 @@ int ovs_vport_send(struct vport *, struct sk_buff *);
/* The following definitions are for implementers of vport devices: */ /* The following definitions are for implementers of vport devices: */
struct vport_percpu_stats {
u64 rx_bytes;
u64 rx_packets;
u64 tx_bytes;
u64 tx_packets;
struct u64_stats_sync sync;
};
struct vport_err_stats { struct vport_err_stats {
u64 rx_dropped; u64 rx_dropped;
u64 rx_errors; u64 rx_errors;
...@@ -89,7 +82,7 @@ struct vport { ...@@ -89,7 +82,7 @@ struct vport {
struct hlist_node dp_hash_node; struct hlist_node dp_hash_node;
const struct vport_ops *ops; const struct vport_ops *ops;
struct vport_percpu_stats __percpu *percpu_stats; struct pcpu_tstats __percpu *percpu_stats;
spinlock_t stats_lock; spinlock_t stats_lock;
struct vport_err_stats err_stats; struct vport_err_stats err_stats;
...@@ -138,14 +131,14 @@ struct vport_parms { ...@@ -138,14 +131,14 @@ struct vport_parms {
struct vport_ops { struct vport_ops {
enum ovs_vport_type type; enum ovs_vport_type type;
/* Called with RTNL lock. */ /* Called with ovs_mutex. */
struct vport *(*create)(const struct vport_parms *); struct vport *(*create)(const struct vport_parms *);
void (*destroy)(struct vport *); void (*destroy)(struct vport *);
int (*set_options)(struct vport *, struct nlattr *); int (*set_options)(struct vport *, struct nlattr *);
int (*get_options)(const struct vport *, struct sk_buff *); int (*get_options)(const struct vport *, struct sk_buff *);
/* Called with rcu_read_lock or RTNL lock. */ /* Called with rcu_read_lock or ovs_mutex. */
const char *(*get_name)(const struct vport *); const char *(*get_name)(const struct vport *);
void (*get_config)(const struct vport *, void *); void (*get_config)(const struct vport *, void *);
int (*get_ifindex)(const struct vport *); int (*get_ifindex)(const struct vport *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment