From: Pravin B Shelar Date: Mon, 7 Nov 2011 23:53:01 +0000 (-0800) Subject: datapath: Fix coding style issues. X-Git-Tag: v1.4.0~204 X-Git-Url: http://git.onelab.eu/?a=commitdiff_plain;h=6455100f38e9312346f4d58511595f695d813537;p=sliver-openvswitch.git datapath: Fix coding style issues. Most of issues are reported by checkpatch.pl Signed-off-by: Pravin B Shelar Acked-by: Jesse Gross Bug #7771 --- diff --git a/datapath/Makefile.am b/datapath/Makefile.am index fb1453c14..1c9e53b9d 100644 --- a/datapath/Makefile.am +++ b/datapath/Makefile.am @@ -1,4 +1,4 @@ -SUBDIRS = +SUBDIRS = if LINUX_ENABLED SUBDIRS += linux endif diff --git a/datapath/brcompat.c b/datapath/brcompat.c index f9d008399..718e48978 100644 --- a/datapath/brcompat.c +++ b/datapath/brcompat.c @@ -9,7 +9,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include -#include +#include #include #include #include @@ -39,7 +39,8 @@ static DECLARE_COMPLETION(brc_done); /* Userspace signaled operation done? */ static struct sk_buff *brc_reply; /* Reply from userspace. */ static u32 brc_seq; /* Sequence number for current op. */ -static struct sk_buff *brc_send_command(struct sk_buff *, struct nlattr **attrs); +static struct sk_buff *brc_send_command(struct sk_buff *, + struct nlattr **attrs); static int brc_send_simple_command(struct sk_buff *); static struct sk_buff *brc_make_request(int op, const char *bridge, @@ -342,18 +343,18 @@ static int brc_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) int err; switch (cmd) { - case SIOCDEVPRIVATE: - err = old_dev_ioctl(dev, rq, cmd); - break; - - case SIOCBRADDIF: - return brc_add_del_port(dev, rq->ifr_ifindex, 1); - case SIOCBRDELIF: - return brc_add_del_port(dev, rq->ifr_ifindex, 0); - - default: - err = -EOPNOTSUPP; - break; + case SIOCDEVPRIVATE: + err = old_dev_ioctl(dev, rq, cmd); + break; + + case SIOCBRADDIF: + return brc_add_del_port(dev, rq->ifr_ifindex, 1); + case SIOCBRDELIF: + return brc_add_del_port(dev, rq->ifr_ifindex, 0); + + default: + err = -EOPNOTSUPP; + break; } return err; @@ -472,7 +473,7 @@ static struct sk_buff *brc_send_command(struct sk_buff *request, if (!wait_for_completion_timeout(&brc_done, BRC_TIMEOUT)) { pr_warn("timed out waiting for userspace\n"); goto error; - } + } /* Grab reply. */ spin_lock_irqsave(&brc_lock, flags); @@ -499,7 +500,7 @@ static int __init brc_init(void) { int err; - printk("Open vSwitch Bridge Compatibility, built "__DATE__" "__TIME__"\n"); + pr_info("Open vSwitch Bridge Compatibility, built "__DATE__" "__TIME__"\n"); /* Set the bridge ioctl handler */ brioctl_set(brc_ioctl_deviceless_stub); diff --git a/datapath/checksum.c b/datapath/checksum.c index 3a131f4e2..1d21c4ed5 100644 --- a/datapath/checksum.c +++ b/datapath/checksum.c @@ -149,8 +149,8 @@ int compute_ip_summed(struct sk_buff *skb, bool xmit) /* In theory this could be either CHECKSUM_PARTIAL or CHECKSUM_COMPLETE. * However, on the receive side we should only get CHECKSUM_PARTIAL * packets from Xen, which uses some special fields to represent this - * (see vswitch_skb_checksum_setup()). Since we can only make one type work, - * pick the one that actually happens in practice. + * (see vswitch_skb_checksum_setup()). Since we can only make one type + * work, pick the one that actually happens in practice. * * On the transmit side (basically after skb_checksum_setup() * has been run or on internal dev transmit), packets with @@ -178,13 +178,14 @@ int compute_ip_summed(struct sk_buff *skb, bool xmit) } /* - * forward_ip_summed - map internal checksum state back onto native kernel fields + * forward_ip_summed - map internal checksum state back onto native + * kernel fields. * * @skb: Packet to manipulate. - * @xmit: Whether we are about send on the transmit path the network stack. This - * follows the same logic as the @xmit field in compute_ip_summed(). - * Generally, a given vport will have opposite values for @xmit passed to these - * two functions. + * @xmit: Whether we are about send on the transmit path the network stack. + * This follows the same logic as the @xmit field in compute_ip_summed(). + * Generally, a given vport will have opposite values for @xmit passed to + * these two functions. * * When a packet is about to egress from OVS take our internal fields (including * any modifications we have made) and recreate the correct representation for @@ -192,7 +193,7 @@ int compute_ip_summed(struct sk_buff *skb, bool xmit) */ void forward_ip_summed(struct sk_buff *skb, bool xmit) { - switch(get_ip_summed(skb)) { + switch (get_ip_summed(skb)) { case OVS_CSUM_NONE: skb->ip_summed = CHECKSUM_NONE; break; @@ -230,7 +231,8 @@ void forward_ip_summed(struct sk_buff *skb, bool xmit) } if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) - skb_set_transport_header(skb, OVS_CB(skb)->csum_start - skb_headroom(skb)); + skb_set_transport_header(skb, OVS_CB(skb)->csum_start - + skb_headroom(skb)); } u8 get_ip_summed(struct sk_buff *skb) @@ -250,7 +252,8 @@ void get_skb_csum_pointers(const struct sk_buff *skb, u16 *csum_start, *csum_offset = skb->csum; } -void set_skb_csum_pointers(struct sk_buff *skb, u16 csum_start, u16 csum_offset) +void set_skb_csum_pointers(struct sk_buff *skb, u16 csum_start, + u16 csum_offset) { OVS_CB(skb)->csum_start = csum_start; skb->csum = csum_offset; diff --git a/datapath/checksum.h b/datapath/checksum.h index 4f85104a7..0e2db4ceb 100644 --- a/datapath/checksum.h +++ b/datapath/checksum.h @@ -34,7 +34,8 @@ u8 get_ip_summed(struct sk_buff *skb); void set_ip_summed(struct sk_buff *skb, u8 ip_summed); void get_skb_csum_pointers(const struct sk_buff *skb, u16 *csum_start, u16 *csum_offset); -void set_skb_csum_pointers(struct sk_buff *skb, u16 csum_start, u16 csum_offset); +void set_skb_csum_pointers(struct sk_buff *skb, u16 csum_start, + u16 csum_offset); #else static inline int compute_ip_summed(struct sk_buff *skb, bool xmit) { @@ -114,7 +115,7 @@ static inline int rpl_pskb_expand_head(struct sk_buff *skb, int nhead, update_csum_start(skb, skb_headroom(skb) - old_headroom); - return 0; + return 0; } #define pskb_expand_head rpl_pskb_expand_head diff --git a/datapath/datapath.c b/datapath/datapath.c index 87056cf8c..059ef85c4 100644 --- a/datapath/datapath.c +++ b/datapath/datapath.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include #include @@ -144,7 +143,7 @@ static int get_dpifindex(struct datapath *dp) return ifindex; } -static inline size_t br_nlmsg_size(void) +static size_t br_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct ifinfomsg)) + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ @@ -300,7 +299,8 @@ void dp_process_received_packet(struct vport *p, struct sk_buff *skb) } /* Look up flow. */ - flow = flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len); + flow = flow_tbl_lookup(rcu_dereference(dp->table), + &key, key_len); if (unlikely(!flow)) { struct dp_upcall_info upcall; @@ -518,7 +518,7 @@ static int validate_sample(const struct nlattr *attr, int rem; memset(attrs, 0, sizeof(attrs)); - nla_for_each_nested (a, attr, rem) { + nla_for_each_nested(a, attr, rem) { int type = nla_type(a); if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type]) return -EINVAL; @@ -554,7 +554,7 @@ static int validate_action_key(const struct nlattr *a, #define ACTION(act, key) (((act) << 8) | (key)) - switch(ACTION(act_type, key_type)) { + switch (ACTION(act_type, key_type)) { const struct ovs_key_ipv4 *ipv4_key; const struct ovs_key_8021q *q_key; @@ -618,19 +618,20 @@ static int validate_action_key(const struct nlattr *a, static int validate_userspace(const struct nlattr *attr) { - static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = - { + static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = { [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 }, [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 }, }; struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1]; int error; - error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX, attr, userspace_policy); + error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX, + attr, userspace_policy); if (error) return error; - if (!a[OVS_USERSPACE_ATTR_PID] || !nla_get_u32(a[OVS_USERSPACE_ATTR_PID])) + if (!a[OVS_USERSPACE_ATTR_PID] || + !nla_get_u32(a[OVS_USERSPACE_ATTR_PID])) return -EINVAL; return 0; @@ -874,7 +875,8 @@ static struct genl_multicast_group dp_flow_multicast_group = { /* Called with genl_lock. */ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, - struct sk_buff *skb, u32 pid, u32 seq, u32 flags, u8 cmd) + struct sk_buff *skb, u32 pid, + u32 seq, u32 flags, u8 cmd) { const int skb_orig_len = skb->len; const struct sw_flow_actions *sf_acts; @@ -913,7 +915,8 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, flow_used_time(used)); if (stats.n_packets) - NLA_PUT(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats); + NLA_PUT(skb, OVS_FLOW_ATTR_STATS, + sizeof(struct ovs_flow_stats), &stats); if (tcp_flags) NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags); @@ -950,15 +953,24 @@ static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow) sf_acts = rcu_dereference_protected(flow->sf_acts, lockdep_genl_is_held()); - len = nla_total_size(FLOW_BUFSIZE); /* OVS_FLOW_ATTR_KEY */ - len += nla_total_size(sf_acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */ - len += nla_total_size(sizeof(struct ovs_flow_stats)); /* OVS_FLOW_ATTR_STATS */ - len += nla_total_size(1); /* OVS_FLOW_ATTR_TCP_FLAGS */ - len += nla_total_size(8); /* OVS_FLOW_ATTR_USED */ - return genlmsg_new(NLMSG_ALIGN(sizeof(struct ovs_header)) + len, GFP_KERNEL); + /* OVS_FLOW_ATTR_KEY */ + len = nla_total_size(FLOW_BUFSIZE); + /* OVS_FLOW_ATTR_ACTIONS */ + len += nla_total_size(sf_acts->actions_len); + /* OVS_FLOW_ATTR_STATS */ + len += nla_total_size(sizeof(struct ovs_flow_stats)); + /* OVS_FLOW_ATTR_TCP_FLAGS */ + len += nla_total_size(1); + /* OVS_FLOW_ATTR_USED */ + len += nla_total_size(8); + + len += NLMSG_ALIGN(sizeof(struct ovs_header)); + + return genlmsg_new(len, GFP_KERNEL); } -static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, struct datapath *dp, +static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, + struct datapath *dp, u32 pid, u32 seq, u8 cmd) { struct sk_buff *skb; @@ -1051,10 +1063,12 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) flow_tbl_insert(table, flow); reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, - info->snd_seq, OVS_FLOW_CMD_NEW); + info->snd_seq, + OVS_FLOW_CMD_NEW); } else { /* We found a matching flow. */ struct sw_flow_actions *old_acts; + struct nlattr *acts_attrs; /* Bail out if we're not allowed to modify an existing flow. * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL @@ -1070,13 +1084,14 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) /* Update actions. */ old_acts = rcu_dereference_protected(flow->sf_acts, lockdep_genl_is_held()); - if (a[OVS_FLOW_ATTR_ACTIONS] && - (old_acts->actions_len != nla_len(a[OVS_FLOW_ATTR_ACTIONS]) || - memcmp(old_acts->actions, nla_data(a[OVS_FLOW_ATTR_ACTIONS]), - old_acts->actions_len))) { + acts_attrs = a[OVS_FLOW_ATTR_ACTIONS]; + if (acts_attrs && + (old_acts->actions_len != nla_len(acts_attrs) || + memcmp(old_acts->actions, nla_data(acts_attrs), + old_acts->actions_len))) { struct sw_flow_actions *new_acts; - new_acts = flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]); + new_acts = flow_actions_alloc(acts_attrs); error = PTR_ERR(new_acts); if (IS_ERR(new_acts)) goto error; @@ -1086,7 +1101,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) } reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, - info->snd_seq, OVS_FLOW_CMD_NEW); + info->snd_seq, OVS_FLOW_CMD_NEW); /* Clear stats. */ if (a[OVS_FLOW_ATTR_CLEAR]) { @@ -1098,7 +1113,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) if (!IS_ERR(reply)) genl_notify(reply, genl_info_net(info), info->snd_pid, - dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL); + dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL); else netlink_set_err(INIT_NET_GENL_SOCK, 0, dp_flow_multicast_group.id, PTR_ERR(reply)); @@ -1137,7 +1152,8 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) if (!flow) return -ENOENT; - reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, OVS_FLOW_CMD_NEW); + reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, + info->snd_seq, OVS_FLOW_CMD_NEW); if (IS_ERR(reply)) return PTR_ERR(reply); @@ -1164,7 +1180,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) dp = get_dp(ovs_header->dp_ifindex); if (!dp) - return -ENODEV; + return -ENODEV; table = get_table_protected(dp); flow = flow_tbl_lookup(table, &key, key_len); @@ -1207,7 +1223,8 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) if (!flow) break; - if (ovs_flow_cmd_fill_info(flow, dp, skb, NETLINK_CB(cb->skb).pid, + if (ovs_flow_cmd_fill_info(flow, dp, skb, + NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, OVS_FLOW_CMD_NEW) < 0) break; @@ -1318,7 +1335,8 @@ static int ovs_dp_cmd_validate(struct nlattr *a[OVS_DP_ATTR_MAX + 1]) } /* Called with genl_mutex and optionally with RTNL lock also. */ -static struct datapath *lookup_datapath(struct ovs_header *ovs_header, struct nlattr *a[OVS_DP_ATTR_MAX + 1]) +static struct datapath *lookup_datapath(struct ovs_header *ovs_header, + struct nlattr *a[OVS_DP_ATTR_MAX + 1]) { struct datapath *dp; @@ -1397,7 +1415,8 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) goto err_destroy_percpu; } - reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW); + reply = ovs_dp_cmd_build_info(dp, info->snd_pid, + info->snd_seq, OVS_DP_CMD_NEW); err = PTR_ERR(reply); if (IS_ERR(reply)) goto err_destroy_local_port; @@ -1444,12 +1463,13 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info) if (IS_ERR(dp)) goto exit_unlock; - reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_DEL); + reply = ovs_dp_cmd_build_info(dp, info->snd_pid, + info->snd_seq, OVS_DP_CMD_DEL); err = PTR_ERR(reply); if (IS_ERR(reply)) goto exit_unlock; - list_for_each_entry_safe (vport, next_vport, &dp->port_list, node) + list_for_each_entry_safe(vport, next_vport, &dp->port_list, node) if (vport->port_no != OVSP_LOCAL) dp_detach_port(vport); @@ -1492,7 +1512,8 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info) if (IS_ERR(dp)) return PTR_ERR(dp); - reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW); + reply = ovs_dp_cmd_build_info(dp, info->snd_pid, + info->snd_seq, OVS_DP_CMD_NEW); if (IS_ERR(reply)) { err = PTR_ERR(reply); netlink_set_err(INIT_NET_GENL_SOCK, 0, @@ -1519,7 +1540,8 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) if (IS_ERR(dp)) return PTR_ERR(dp); - reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW); + reply = ovs_dp_cmd_build_info(dp, info->snd_pid, + info->snd_seq, OVS_DP_CMD_NEW); if (IS_ERR(reply)) return PTR_ERR(reply); @@ -1532,7 +1554,7 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) int skip = cb->args[0]; int i = 0; - list_for_each_entry (dp, &dps, list_node) { + list_for_each_entry(dp, &dps, list_node) { if (i < skip) continue; if (ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid, @@ -1618,7 +1640,8 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport_get_name(vport)); NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid); - nla = nla_reserve(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats)); + nla = nla_reserve(skb, OVS_VPORT_ATTR_STATS, + sizeof(struct ovs_vport_stats)); if (!nla) goto nla_put_failure; @@ -1694,7 +1717,8 @@ static struct vport *lookup_vport(struct ovs_header *ovs_header, } /* Called with RTNL lock. */ -static int change_vport(struct vport *vport, struct nlattr *a[OVS_VPORT_ATTR_MAX + 1]) +static int change_vport(struct vport *vport, + struct nlattr *a[OVS_VPORT_ATTR_MAX + 1]) { int err = 0; @@ -1768,12 +1792,13 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) if (IS_ERR(vport)) goto exit_unlock; - dp_sysfs_add_if(vport); + dp_sysfs_add_if(vport); err = change_vport(vport, a); if (!err) { reply = ovs_vport_cmd_build_info(vport, info->snd_pid, - info->snd_seq, OVS_VPORT_CMD_NEW); + info->snd_seq, + OVS_VPORT_CMD_NEW); if (IS_ERR(reply)) err = PTR_ERR(reply); } @@ -1809,8 +1834,10 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) goto exit_unlock; err = 0; - if (a[OVS_VPORT_ATTR_TYPE] && nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport_get_type(vport)) + if (a[OVS_VPORT_ATTR_TYPE] && + nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport_get_type(vport)) err = -EINVAL; + if (!err && a[OVS_VPORT_ATTR_OPTIONS]) err = vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); if (!err) @@ -2032,7 +2059,8 @@ static int __init dp_init(void) BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb)); - printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR); + pr_info("Open vSwitch %s, built "__DATE__" "__TIME__"\n", + VERSION BUILDNR); err = tnl_init(); if (err) diff --git a/datapath/datapath.h b/datapath/datapath.h index 4964a51c6..3d7ee629d 100644 --- a/datapath/datapath.h +++ b/datapath/datapath.h @@ -131,7 +131,8 @@ extern int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd); void dp_process_received_packet(struct vport *, struct sk_buff *); void dp_detach_port(struct vport *); -int dp_upcall(struct datapath *, struct sk_buff *, const struct dp_upcall_info *); +int dp_upcall(struct datapath *, struct sk_buff *, + const struct dp_upcall_info *); struct datapath *get_dp(int dp_idx); const char *dp_name(const struct datapath *dp); diff --git a/datapath/dp_sysfs.h b/datapath/dp_sysfs.h index 49af58a22..20a5548ab 100644 --- a/datapath/dp_sysfs.h +++ b/datapath/dp_sysfs.h @@ -21,7 +21,7 @@ int dp_sysfs_add_if(struct vport *p); int dp_sysfs_del_if(struct vport *p); #ifdef CONFIG_SYSFS -extern struct sysfs_ops brport_sysfs_ops; +extern const struct sysfs_ops brport_sysfs_ops; #endif #endif /* dp_sysfs.h */ diff --git a/datapath/dp_sysfs_dp.c b/datapath/dp_sysfs_dp.c index 16aa78790..fbe3ca1eb 100644 --- a/datapath/dp_sysfs_dp.c +++ b/datapath/dp_sysfs_dp.c @@ -77,7 +77,7 @@ static ssize_t store_bridge_parm(DEVICE_PARAMS, dp = sysfs_get_dp(to_net_dev(d)); if (dp) - printk("%s: xxx writing dp parms not supported yet!\n", + pr_warning("%s: xxx writing dp parms not supported yet!\n", dp_name(dp)); else result = -ENODEV; @@ -96,7 +96,7 @@ static ssize_t show_forward_delay(DEVICE_PARAMS, char *buf) static void set_forward_delay(struct datapath *dp, unsigned long val) { - printk("%s: xxx attempt to set_forward_delay()\n", dp_name(dp)); + pr_info("%s: xxx attempt to set_forward_delay()\n", dp_name(dp)); } static ssize_t store_forward_delay(DEVICE_PARAMS, @@ -114,7 +114,7 @@ static ssize_t show_hello_time(DEVICE_PARAMS, char *buf) static void set_hello_time(struct datapath *dp, unsigned long val) { - printk("%s: xxx attempt to set_hello_time()\n", dp_name(dp)); + pr_info("%s: xxx attempt to set_hello_time()\n", dp_name(dp)); } static ssize_t store_hello_time(DEVICE_PARAMS, @@ -133,7 +133,7 @@ static ssize_t show_max_age(DEVICE_PARAMS, char *buf) static void set_max_age(struct datapath *dp, unsigned long val) { - printk("%s: xxx attempt to set_max_age()\n", dp_name(dp)); + pr_info("%s: xxx attempt to set_max_age()\n", dp_name(dp)); } static ssize_t store_max_age(DEVICE_PARAMS, @@ -150,7 +150,7 @@ static ssize_t show_ageing_time(DEVICE_PARAMS, char *buf) static void set_ageing_time(struct datapath *dp, unsigned long val) { - printk("%s: xxx attempt to set_ageing_time()\n", dp_name(dp)); + pr_info("%s: xxx attempt to set_ageing_time()\n", dp_name(dp)); } static ssize_t store_ageing_time(DEVICE_PARAMS, @@ -178,7 +178,7 @@ static ssize_t store_stp_state(DEVICE_PARAMS, dp = sysfs_get_dp(to_net_dev(d)); if (dp) - printk("%s: xxx attempt to set_stp_state()\n", dp_name(dp)); + pr_info("%s: xxx attempt to set_stp_state()\n", dp_name(dp)); else result = -ENODEV; @@ -196,7 +196,7 @@ static ssize_t show_priority(DEVICE_PARAMS, char *buf) static void set_priority(struct datapath *dp, unsigned long val) { - printk("%s: xxx attempt to set_priority()\n", dp_name(dp)); + pr_info("%s: xxx attempt to set_priority()\n", dp_name(dp)); } static ssize_t store_priority(DEVICE_PARAMS, @@ -225,7 +225,8 @@ static ssize_t show_bridge_id(DEVICE_PARAMS, char *buf) addr = vport_get_addr(vport); result = sprintf(buf, "%.2x%.2x.%.2x%.2x%.2x%.2x%.2x%.2x\n", - 0, 0, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + 0, 0, addr[0], addr[1], addr[2], addr[3], + addr[4], addr[5]); } else result = -ENODEV; @@ -300,7 +301,8 @@ static ssize_t store_group_addr(DEVICE_PARAMS, dp = sysfs_get_dp(to_net_dev(d)); if (dp) - printk("%s: xxx attempt to store_group_addr()\n", dp_name(dp)); + pr_info("%s: xxx attempt to store_group_addr()\n", + dp_name(dp)); else result = -ENODEV; @@ -366,7 +368,7 @@ int dp_sysfs_add_dp(struct datapath *dp) err = kobject_add(&dp->ifobj, kobj, SYSFS_BRIDGE_PORT_SUBDIR); if (err) { pr_info("%s: can't add kobject (directory) %s/%s\n", - __FUNCTION__, dp_name(dp), kobject_name(&dp->ifobj)); + __func__, dp_name(dp), kobject_name(&dp->ifobj)); goto out2; } kobject_uevent(&dp->ifobj, KOBJ_ADD); diff --git a/datapath/dp_sysfs_if.c b/datapath/dp_sysfs_if.c index c37e11fd1..bb253ab22 100644 --- a/datapath/dp_sysfs_if.c +++ b/datapath/dp_sysfs_if.c @@ -12,6 +12,8 @@ * This has been shamelessly copied from the kernel sources. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -31,7 +33,7 @@ struct brport_attribute { }; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36) -#define BRPORT_ATTR(_name,_mode,_show,_store) \ +#define BRPORT_ATTR(_name, _mode, _show, _store) \ struct brport_attribute brport_attr_##_name = { \ .attr = {.name = __stringify(_name), \ .mode = _mode }, \ @@ -39,10 +41,10 @@ struct brport_attribute brport_attr_##_name = { \ .store = _store, \ }; #else -#define BRPORT_ATTR(_name,_mode,_show,_store) \ -struct brport_attribute brport_attr_##_name = { \ - .attr = {.name = __stringify(_name), \ - .mode = _mode, \ +#define BRPORT_ATTR(_name, _mode, _show, _store) \ +struct brport_attribute brport_attr_##_name = { \ + .attr = {.name = __stringify(_name), \ + .mode = _mode, \ .owner = THIS_MODULE, }, \ .show = _show, \ .store = _store, \ @@ -126,22 +128,19 @@ static ssize_t show_port_state(struct vport *p, char *buf) } static BRPORT_ATTR(state, S_IRUGO, show_port_state, NULL); -static ssize_t show_message_age_timer(struct vport *p, - char *buf) +static ssize_t show_message_age_timer(struct vport *p, char *buf) { return sprintf(buf, "%d\n", 0); } static BRPORT_ATTR(message_age_timer, S_IRUGO, show_message_age_timer, NULL); -static ssize_t show_forward_delay_timer(struct vport *p, - char *buf) +static ssize_t show_forward_delay_timer(struct vport *p, char *buf) { return sprintf(buf, "%d\n", 0); } static BRPORT_ATTR(forward_delay_timer, S_IRUGO, show_forward_delay_timer, NULL); -static ssize_t show_hold_timer(struct vport *p, - char *buf) +static ssize_t show_hold_timer(struct vport *p, char *buf) { return sprintf(buf, "%d\n", 0); } @@ -168,32 +167,32 @@ static struct brport_attribute *brport_attrs[] = { #define to_vport_attr(_at) container_of(_at, struct brport_attribute, attr) #define to_vport(obj) container_of(obj, struct vport, kobj) -static ssize_t brport_show(struct kobject * kobj, - struct attribute * attr, char * buf) +static ssize_t brport_show(struct kobject *kobj, + struct attribute *attr, char *buf) { - struct brport_attribute * brport_attr = to_vport_attr(attr); - struct vport * p = to_vport(kobj); + struct brport_attribute *brport_attr = to_vport_attr(attr); + struct vport *p = to_vport(kobj); return brport_attr->show(p, buf); } -static ssize_t brport_store(struct kobject * kobj, - struct attribute * attr, - const char * buf, size_t count) +static ssize_t brport_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t count) { - struct vport * p = to_vport(kobj); + struct vport *p = to_vport(kobj); ssize_t ret = -EINVAL; if (!capable(CAP_NET_ADMIN)) return -EPERM; - printk("%s: xxx writing port parms not supported yet!\n", - dp_name(p->dp)); + pr_warning("%s: xxx writing port parms not supported yet!\n", + dp_name(p->dp)); return ret; } -struct sysfs_ops brport_sysfs_ops = { +const struct sysfs_ops brport_sysfs_ops = { .show = brport_show, .store = brport_store, }; diff --git a/datapath/flow.c b/datapath/flow.c index 9e0b842ef..670b6f8f2 100644 --- a/datapath/flow.c +++ b/datapath/flow.c @@ -8,7 +8,7 @@ #include "flow.h" #include "datapath.h" -#include +#include #include #include #include @@ -49,13 +49,13 @@ static int check_header(struct sk_buff *skb, int len) return 0; } -static inline bool arphdr_ok(struct sk_buff *skb) +static bool arphdr_ok(struct sk_buff *skb) { return pskb_may_pull(skb, skb_network_offset(skb) + sizeof(struct arp_eth_header)); } -static inline int check_iphdr(struct sk_buff *skb) +static int check_iphdr(struct sk_buff *skb) { unsigned int nh_ofs = skb_network_offset(skb); unsigned int ip_len; @@ -74,7 +74,7 @@ static inline int check_iphdr(struct sk_buff *skb) return 0; } -static inline bool tcphdr_ok(struct sk_buff *skb) +static bool tcphdr_ok(struct sk_buff *skb) { int th_ofs = skb_transport_offset(skb); int tcp_len; @@ -90,13 +90,13 @@ static inline bool tcphdr_ok(struct sk_buff *skb) return true; } -static inline bool udphdr_ok(struct sk_buff *skb) +static bool udphdr_ok(struct sk_buff *skb) { return pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)); } -static inline bool icmphdr_ok(struct sk_buff *skb) +static bool icmphdr_ok(struct sk_buff *skb) { return pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct icmphdr)); @@ -116,8 +116,8 @@ u64 flow_used_time(unsigned long flow_jiffies) } #define SW_FLOW_KEY_OFFSET(field) \ - offsetof(struct sw_flow_key, field) + \ - FIELD_SIZEOF(struct sw_flow_key, field) + (offsetof(struct sw_flow_key, field) + \ + FIELD_SIZEOF(struct sw_flow_key, field)) /** * skip_exthdr - skip any IPv6 extension headers @@ -205,7 +205,8 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key, ipv6_addr_copy(&key->ipv6.addr.src, &nh->saddr); ipv6_addr_copy(&key->ipv6.addr.dst, &nh->daddr); - payload_ofs = skip_exthdr(skb, payload_ofs, &nexthdr, &key->ip.tos_frag); + payload_ofs = skip_exthdr(skb, payload_ofs, + &nexthdr, &key->ip.tos_frag); if (unlikely(payload_ofs < 0)) return -EINVAL; @@ -286,7 +287,7 @@ static struct hlist_head __rcu *find_bucket(struct flow_table * table, u32 hash) static struct flex_array __rcu *alloc_buckets(unsigned int n_buckets) { - struct flex_array __rcu * buckets; + struct flex_array __rcu *buckets; int i, err; buckets = flex_array_alloc(sizeof(struct hlist_head *), @@ -307,7 +308,7 @@ static struct flex_array __rcu *alloc_buckets(unsigned int n_buckets) return buckets; } -static void free_buckets(struct flex_array * buckets) +static void free_buckets(struct flex_array *buckets) { flex_array_free(buckets); } @@ -368,10 +369,10 @@ static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) void flow_tbl_deferred_destroy(struct flow_table *table) { - if (!table) - return; + if (!table) + return; - call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb); + call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb); } struct sw_flow *flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last) @@ -565,7 +566,8 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, icmp_len -= sizeof(*nd); offset = 0; while (icmp_len >= 8) { - struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)(nd->opt + offset); + struct nd_opt_hdr *nd_opt = + (struct nd_opt_hdr *)(nd->opt + offset); int opt_len = nd_opt->nd_opt_len * 8; if (unlikely(!opt_len || opt_len > icmp_len)) @@ -719,8 +721,8 @@ int flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, if (icmphdr_ok(skb)) { struct icmphdr *icmp = icmp_hdr(skb); /* The ICMP type and code fields use the 16-bit - * transport port fields, so we need to store them - * in 16-bit network byte order. */ + * transport port fields, so we need to store + * them in 16-bit network byte order. */ key->ipv4.tp.src = htons(icmp->type); key->ipv4.tp.dst = htons(icmp->code); } @@ -798,10 +800,10 @@ out: u32 flow_hash(const struct sw_flow_key *key, int key_len) { - return jhash2((u32*)key, DIV_ROUND_UP(key_len, sizeof(u32)), hash_seed); + return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), hash_seed); } -struct sw_flow * flow_tbl_lookup(struct flow_table *table, +struct sw_flow *flow_tbl_lookup(struct flow_table *table, struct sw_flow_key *key, int key_len) { struct sw_flow *flow; @@ -911,9 +913,10 @@ int flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, const struct ovs_key_arp *arp_key; const struct ovs_key_nd *nd_key; - int type = nla_type(nla); + int type = nla_type(nla); - if (type > OVS_KEY_ATTR_MAX || nla_len(nla) != ovs_key_lens[type]) + if (type > OVS_KEY_ATTR_MAX || + nla_len(nla) != ovs_key_lens[type]) goto invalid; #define TRANSITION(PREV_TYPE, TYPE) (((PREV_TYPE) << 16) | (TYPE)) @@ -1172,7 +1175,7 @@ int flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, __be64 *tun_id, prev_type = OVS_KEY_ATTR_UNSPEC; nla_for_each_nested(nla, attr, rem) { - int type = nla_type(nla); + int type = nla_type(nla); if (type > OVS_KEY_ATTR_MAX || nla_len(nla) != ovs_key_lens[type]) return -EINVAL; diff --git a/datapath/flow.h b/datapath/flow.h index e68269e09..5bd8c8daa 100644 --- a/datapath/flow.h +++ b/datapath/flow.h @@ -104,8 +104,7 @@ struct sw_flow { u8 tcp_flags; /* Union of seen TCP flags. */ }; -struct arp_eth_header -{ +struct arp_eth_header { __be16 ar_hrd; /* format of hardware address */ __be16 ar_pro; /* format of protocol address */ unsigned char ar_hln; /* length of hardware address */ @@ -164,9 +163,9 @@ int flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, __be64 *tun_id, #define TBL_MIN_BUCKETS 1024 struct flow_table { - struct flex_array *buckets; - unsigned int count, n_buckets; - struct rcu_head rcu; + struct flex_array *buckets; + unsigned int count, n_buckets; + struct rcu_head rcu; }; static inline int flow_tbl_count(struct flow_table *table) diff --git a/datapath/linux/Kbuild.in b/datapath/linux/Kbuild.in index 4e1611a31..07d106dd4 100644 --- a/datapath/linux/Kbuild.in +++ b/datapath/linux/Kbuild.in @@ -9,7 +9,7 @@ include $(srcdir)/../Modules.mk include $(srcdir)/Modules.mk EXTRA_CFLAGS := -DVERSION=\"$(VERSION)\" -EXTRA_CFLAGS += -I$(srcdir)/.. +EXTRA_CFLAGS += -I$(srcdir)/.. EXTRA_CFLAGS += -I$(builddir)/.. ifeq '$(BUILDNR)' '0' EXTRA_CFLAGS += -DBUILDNR=\"\" diff --git a/datapath/linux/Modules.mk b/datapath/linux/Modules.mk index 1e4fef6d4..0857735d5 100644 --- a/datapath/linux/Modules.mk +++ b/datapath/linux/Modules.mk @@ -10,7 +10,6 @@ openvswitch_sources += \ linux/compat/skbuff-openvswitch.c \ linux/compat/time.c openvswitch_headers += \ - linux/compat/include/asm-generic/bug.h \ linux/compat/include/linux/compiler.h \ linux/compat/include/linux/compiler-gcc.h \ linux/compat/include/linux/cpumask.h \ diff --git a/datapath/linux/compat/addrconf_core-openvswitch.c b/datapath/linux/compat/addrconf_core-openvswitch.c index b5a757407..35e36120b 100644 --- a/datapath/linux/compat/addrconf_core-openvswitch.c +++ b/datapath/linux/compat/addrconf_core-openvswitch.c @@ -12,7 +12,7 @@ static inline unsigned ipv6_addr_scope2type(unsigned scope) { - switch(scope) { + switch (scope) { case IPV6_ADDR_SCOPE_NODELOCAL: return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_NODELOCAL) | IPV6_ADDR_LOOPBACK); diff --git a/datapath/linux/compat/flex_array.c b/datapath/linux/compat/flex_array.c index 3b96b8e46..1e6d9c18b 100644 --- a/datapath/linux/compat/flex_array.c +++ b/datapath/linux/compat/flex_array.c @@ -64,7 +64,7 @@ static inline int elements_fit_in_base(struct flex_array *fa) * page pointers that we can fit in the base structure or (using * integer math): * - * (PAGE_SIZE/element_size) * (PAGE_SIZE-8)/sizeof(void *) + * (PAGE_SIZE/element_size) * (PAGE_SIZE-8)/sizeof(void *) * * Here's a table showing example capacities. Note that the maximum * index that the get/put() functions is just nr_objects-1. This @@ -178,7 +178,7 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags) * flex_array_put - copy data into the array at @element_nr * @fa: the flex array to copy data into * @element_nr: index of the position in which to insert - * the new element. + * the new element. * @src: address of data to copy into the array * @flags: page allocation flags to use for array expansion * @@ -248,7 +248,8 @@ int flex_array_clear(struct flex_array *fa, unsigned int element_nr) /** * flex_array_prealloc - guarantee that array space exists * @fa: the flex array for which to preallocate parts - * @start: index of first array element for which space is allocated + * @start: index of first array element for which space is + * allocated * @nr_elements: number of elements for which space is allocated * @flags: page allocation flags * diff --git a/datapath/linux/compat/include/asm-generic/bug.h b/datapath/linux/compat/include/asm-generic/bug.h deleted file mode 100644 index 1d9b31401..000000000 --- a/datapath/linux/compat/include/asm-generic/bug.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef __ASM_GENERIC_BUG_WRAPPER_H -#define __ASM_GENERIC_BUG_WRAPPER_H - -#include_next - -#ifndef WARN_ON_ONCE -#define WARN_ON_ONCE(condition) ({ \ - static int __warned; \ - int __ret_warn_once = !!(condition); \ - \ - if (unlikely(__ret_warn_once) && !__warned) { \ - WARN_ON(1); \ - __warned = 1; \ - } \ - unlikely(__ret_warn_once); \ -}) -#endif - -#endif diff --git a/datapath/linux/compat/include/linux/dmi.h b/datapath/linux/compat/include/linux/dmi.h index 01d4396fb..20945d3c0 100644 --- a/datapath/linux/compat/include/linux/dmi.h +++ b/datapath/linux/compat/include/linux/dmi.h @@ -14,78 +14,78 @@ #include enum dmi_field { - DMI_NONE, - DMI_BIOS_VENDOR, - DMI_BIOS_VERSION, - DMI_BIOS_DATE, - DMI_SYS_VENDOR, - DMI_PRODUCT_NAME, - DMI_PRODUCT_VERSION, - DMI_PRODUCT_SERIAL, - DMI_PRODUCT_UUID, - DMI_BOARD_VENDOR, - DMI_BOARD_NAME, - DMI_BOARD_VERSION, - DMI_BOARD_SERIAL, - DMI_BOARD_ASSET_TAG, - DMI_CHASSIS_VENDOR, - DMI_CHASSIS_TYPE, - DMI_CHASSIS_VERSION, - DMI_CHASSIS_SERIAL, - DMI_CHASSIS_ASSET_TAG, - DMI_STRING_MAX, + DMI_NONE, + DMI_BIOS_VENDOR, + DMI_BIOS_VERSION, + DMI_BIOS_DATE, + DMI_SYS_VENDOR, + DMI_PRODUCT_NAME, + DMI_PRODUCT_VERSION, + DMI_PRODUCT_SERIAL, + DMI_PRODUCT_UUID, + DMI_BOARD_VENDOR, + DMI_BOARD_NAME, + DMI_BOARD_VERSION, + DMI_BOARD_SERIAL, + DMI_BOARD_ASSET_TAG, + DMI_CHASSIS_VENDOR, + DMI_CHASSIS_TYPE, + DMI_CHASSIS_VERSION, + DMI_CHASSIS_SERIAL, + DMI_CHASSIS_ASSET_TAG, + DMI_STRING_MAX, }; enum dmi_device_type { - DMI_DEV_TYPE_ANY = 0, - DMI_DEV_TYPE_OTHER, - DMI_DEV_TYPE_UNKNOWN, - DMI_DEV_TYPE_VIDEO, - DMI_DEV_TYPE_SCSI, - DMI_DEV_TYPE_ETHERNET, - DMI_DEV_TYPE_TOKENRING, - DMI_DEV_TYPE_SOUND, - DMI_DEV_TYPE_IPMI = -1, - DMI_DEV_TYPE_OEM_STRING = -2 + DMI_DEV_TYPE_ANY = 0, + DMI_DEV_TYPE_OTHER, + DMI_DEV_TYPE_UNKNOWN, + DMI_DEV_TYPE_VIDEO, + DMI_DEV_TYPE_SCSI, + DMI_DEV_TYPE_ETHERNET, + DMI_DEV_TYPE_TOKENRING, + DMI_DEV_TYPE_SOUND, + DMI_DEV_TYPE_IPMI = -1, + DMI_DEV_TYPE_OEM_STRING = -2 }; struct dmi_header { - u8 type; - u8 length; - u16 handle; + u8 type; + u8 length; + u16 handle; }; /* * DMI callbacks for problem boards */ struct dmi_strmatch { - u8 slot; - char *substr; + u8 slot; + char *substr; }; struct dmi_system_id { - int (*callback)(struct dmi_system_id *); - const char *ident; - struct dmi_strmatch matches[4]; - void *driver_data; + int (*callback)(struct dmi_system_id *); + const char *ident; + struct dmi_strmatch matches[4]; + void *driver_data; }; #define DMI_MATCH(a, b) { a, b } struct dmi_device { - struct list_head list; - int type; - const char *name; - void *device_data; /* Type specific data */ + struct list_head list; + int type; + const char *name; + void *device_data; /* Type specific data */ }; /* No CONFIG_DMI before 2.6.16 */ #if defined(CONFIG_DMI) || defined(CONFIG_X86_32) extern int dmi_check_system(struct dmi_system_id *list); -extern char * dmi_get_system_info(int field); -extern struct dmi_device * dmi_find_device(int type, const char *name, - struct dmi_device *from); +extern char *dmi_get_system_info(int field); +extern struct dmi_device *dmi_find_device(int type, const char *name, + struct dmi_device *from); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) extern void dmi_scan_machine(void); #endif @@ -99,9 +99,9 @@ extern int dmi_name_in_vendors(char *str); #else static inline int dmi_check_system(struct dmi_system_id *list) { return 0; } -static inline char * dmi_get_system_info(int field) { return NULL; } -static inline struct dmi_device * dmi_find_device(int type, const char *name, - struct dmi_device *from) { return NULL; } +static inline char *dmi_get_system_info(int field) { return NULL; } +static inline struct dmi_device *dmi_find_device(int type, const char *name, + struct dmi_device *from) { return NULL; } static inline int dmi_get_year(int year) { return 0; } static inline int dmi_name_in_vendors(char *s) { return 0; } diff --git a/datapath/linux/compat/include/linux/icmp.h b/datapath/linux/compat/include/linux/icmp.h index 89b354e4c..4be4d2b61 100644 --- a/datapath/linux/compat/include/linux/icmp.h +++ b/datapath/linux/compat/include/linux/icmp.h @@ -6,7 +6,7 @@ #ifndef HAVE_SKBUFF_HEADER_HELPERS static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb) { - return (struct icmphdr *)skb_transport_header(skb); + return (struct icmphdr *)skb_transport_header(skb); } #endif diff --git a/datapath/linux/compat/include/linux/icmpv6.h b/datapath/linux/compat/include/linux/icmpv6.h index f005a48ee..06d91b395 100644 --- a/datapath/linux/compat/include/linux/icmpv6.h +++ b/datapath/linux/compat/include/linux/icmpv6.h @@ -6,7 +6,7 @@ #ifndef HAVE_ICMP6_HDR static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) { - return (struct icmp6hdr *)skb_transport_header(skb); + return (struct icmp6hdr *)skb_transport_header(skb); } #endif diff --git a/datapath/linux/compat/include/linux/jiffies.h b/datapath/linux/compat/include/linux/jiffies.h index 3286e6346..a64f22641 100644 --- a/datapath/linux/compat/include/linux/jiffies.h +++ b/datapath/linux/compat/include/linux/jiffies.h @@ -9,17 +9,17 @@ /* Same as above, but does so with platform independent 64bit types. * These must be used when utilizing jiffies_64 (i.e. return value of * get_jiffies_64() */ -#define time_after64(a,b) \ - (typecheck(__u64, a) && \ - typecheck(__u64, b) && \ - ((__s64)(b) - (__s64)(a) < 0)) -#define time_before64(a,b) time_after64(b,a) +#define time_after64(a, b) \ + (typecheck(__u64, a) && \ + typecheck(__u64, b) && \ + ((__s64)(b) - (__s64)(a) < 0)) +#define time_before64(a, b) time_after64(b, a) -#define time_after_eq64(a,b) \ - (typecheck(__u64, a) && \ - typecheck(__u64, b) && \ - ((__s64)(a) - (__s64)(b) >= 0)) -#define time_before_eq64(a,b) time_after_eq64(b,a) +#define time_after_eq64(a, b) \ + (typecheck(__u64, a) && \ + typecheck(__u64, b) && \ + ((__s64)(a) - (__s64)(b) >= 0)) +#define time_before_eq64(a, b) time_after_eq64(b, a) #endif /* linux kernel < 2.6.19 */ diff --git a/datapath/linux/compat/include/linux/kernel.h b/datapath/linux/compat/include/linux/kernel.h index 4af88516e..bd6e9a49d 100644 --- a/datapath/linux/compat/include/linux/kernel.h +++ b/datapath/linux/compat/include/linux/kernel.h @@ -10,25 +10,25 @@ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) #undef pr_emerg #define pr_emerg(fmt, ...) \ - printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) + printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) #undef pr_alert #define pr_alert(fmt, ...) \ - printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) + printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) #undef pr_crit #define pr_crit(fmt, ...) \ - printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) + printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) #undef pr_err #define pr_err(fmt, ...) \ - printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) + printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) #undef pr_warning #define pr_warning(fmt, ...) \ - printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) + printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) #undef pr_notice #define pr_notice(fmt, ...) \ - printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) + printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) #undef pr_info #define pr_info(fmt, ...) \ - printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) + printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) #undef pr_cont #define pr_cont(fmt, ...) \ printk(KERN_CONT fmt, ##__VA_ARGS__) @@ -55,7 +55,7 @@ #endif #ifndef DIV_ROUND_UP -#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) #endif #endif /* linux/kernel.h */ diff --git a/datapath/linux/compat/include/linux/lockdep.h b/datapath/linux/compat/include/linux/lockdep.h index 1c839423a..da3dfe8da 100644 --- a/datapath/linux/compat/include/linux/lockdep.h +++ b/datapath/linux/compat/include/linux/lockdep.h @@ -27,8 +27,7 @@ struct lockdep_map; /* * Lock-class usage-state bits: */ -enum lock_usage_bit -{ +enum lock_usage_bit { LOCK_USED = 0, LOCK_USED_IN_HARDIRQ, LOCK_USED_IN_SOFTIRQ, @@ -212,7 +211,7 @@ struct held_lock { struct lockdep_map *instance; #ifdef CONFIG_LOCK_STAT - u64 waittime_stamp; + u64 waittime_stamp; u64 holdtime_stamp; #endif /* diff --git a/datapath/linux/compat/include/linux/mutex.h b/datapath/linux/compat/include/linux/mutex.h index 020a28097..38a3d0d53 100644 --- a/datapath/linux/compat/include/linux/mutex.h +++ b/datapath/linux/compat/include/linux/mutex.h @@ -5,7 +5,7 @@ #include #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) -#include +#include struct mutex { struct semaphore sema; @@ -15,7 +15,7 @@ struct mutex { #define mutex_destroy(mutex) do { } while (0) #define __MUTEX_INITIALIZER(name) \ - __SEMAPHORE_INITIALIZER(name,1) + __SEMAPHORE_INITIALIZER(name, 1) #define DEFINE_MUTEX(mutexname) \ struct mutex mutexname = { __MUTEX_INITIALIZER(mutexname.sema) } diff --git a/datapath/linux/compat/include/linux/netdevice.h b/datapath/linux/compat/include/linux/netdevice.h index 8d24cd969..0c2f2f4c7 100644 --- a/datapath/linux/compat/include/linux/netdevice.h +++ b/datapath/linux/compat/include/linux/netdevice.h @@ -60,11 +60,11 @@ typedef int netdev_tx_t; #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) /* Linux 2.6.24 added a network namespace pointer to the macro. */ #undef for_each_netdev -#define for_each_netdev(net,d) list_for_each_entry(d, &dev_base_head, dev_list) +#define for_each_netdev(net, d) list_for_each_entry(d, &dev_base_head, dev_list) #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -#define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e)) +#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) @@ -100,7 +100,7 @@ static inline void netdev_rx_handler_unregister(struct net_device *dev) #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) #undef SET_ETHTOOL_OPS #define SET_ETHTOOL_OPS(netdev, ops) \ - ( (netdev)->ethtool_ops = (struct ethtool_ops *)(ops) ) + ((netdev)->ethtool_ops = (struct ethtool_ops *)(ops)) #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) @@ -149,8 +149,8 @@ u32 rpl_netif_skb_features(struct sk_buff *skb); #define netif_needs_gso rpl_netif_needs_gso static inline int rpl_netif_needs_gso(struct sk_buff *skb, int features) { - return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || - unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); + return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || + unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); } #endif diff --git a/datapath/linux/compat/include/linux/netlink.h b/datapath/linux/compat/include/linux/netlink.h index 16660977c..44a20738b 100644 --- a/datapath/linux/compat/include/linux/netlink.h +++ b/datapath/linux/compat/include/linux/netlink.h @@ -7,7 +7,7 @@ #ifndef NLA_TYPE_MASK #define NLA_F_NESTED (1 << 15) #define NLA_F_NET_BYTEORDER (1 << 14) -#define NLA_TYPE_MASK ~(NLA_F_NESTED | NLA_F_NET_BYTEORDER) +#define NLA_TYPE_MASK (~(NLA_F_NESTED | NLA_F_NET_BYTEORDER)) #endif #include diff --git a/datapath/linux/compat/include/linux/skbuff.h b/datapath/linux/compat/include/linux/skbuff.h index b728c5179..ccf471714 100644 --- a/datapath/linux/compat/include/linux/skbuff.h +++ b/datapath/linux/compat/include/linux/skbuff.h @@ -12,22 +12,22 @@ #define skb_headroom rpl_skb_headroom static inline unsigned int rpl_skb_headroom(const struct sk_buff *skb) { - return skb->data - skb->head; + return skb->data - skb->head; } #endif #ifndef HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, - const int offset, void *to, - const unsigned int len) + const int offset, void *to, + const unsigned int len) { memcpy(to, skb->data + offset, len); } static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, - const int offset, - const void *from, - const unsigned int len) + const int offset, + const void *from, + const unsigned int len) { memcpy(skb->data + offset, from, len); } @@ -55,7 +55,7 @@ static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, #ifndef HAVE_SKB_COW_HEAD static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, - int cloned) + int cloned) { int delta = 0; @@ -174,7 +174,7 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) static inline int skb_transport_offset(const struct sk_buff *skb) { - return skb_transport_header(skb) - skb->data; + return skb_transport_header(skb) - skb->data; } static inline int skb_network_offset(const struct sk_buff *skb) diff --git a/datapath/linux/compat/include/linux/tcp.h b/datapath/linux/compat/include/linux/tcp.h index 6fad1933b..9260b191b 100644 --- a/datapath/linux/compat/include/linux/tcp.h +++ b/datapath/linux/compat/include/linux/tcp.h @@ -11,7 +11,7 @@ static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb) static inline unsigned int tcp_hdrlen(const struct sk_buff *skb) { - return tcp_hdr(skb)->doff * 4; + return tcp_hdr(skb)->doff * 4; } #endif /* !HAVE_SKBUFF_HEADER_HELPERS */ diff --git a/datapath/linux/compat/include/linux/timer.h b/datapath/linux/compat/include/linux/timer.h index 6c3a9b0f5..b9954a525 100644 --- a/datapath/linux/compat/include/linux/timer.h +++ b/datapath/linux/compat/include/linux/timer.h @@ -6,11 +6,11 @@ #include #ifndef RHEL_RELEASE_VERSION -#define RHEL_RELEASE_VERSION(X,Y) ( 0 ) +#define RHEL_RELEASE_VERSION(X, Y) (0) #endif #if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) && \ - (!defined(RHEL_RELEASE_CODE) || \ - (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,1)))) + (!defined(RHEL_RELEASE_CODE) || \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5, 1)))) extern unsigned long volatile jiffies; @@ -88,7 +88,7 @@ static inline unsigned long __round_jiffies(unsigned long j, int cpu) */ static inline unsigned long round_jiffies(unsigned long j) { - return __round_jiffies(j, 0); // FIXME + return __round_jiffies(j, 0); /* FIXME */ } #endif /* linux kernel < 2.6.20 */ diff --git a/datapath/linux/compat/include/net/genetlink.h b/datapath/linux/compat/include/net/genetlink.h index b24f8518a..a1ff7c1c3 100644 --- a/datapath/linux/compat/include/net/genetlink.h +++ b/datapath/linux/compat/include/net/genetlink.h @@ -52,8 +52,7 @@ extern int busted_nlmsg_multicast(struct sock *sk, struct sk_buff *skb, * @list: list entry for linking * @family: pointer to family, need not be set before registering */ -struct genl_multicast_group -{ +struct genl_multicast_group { struct genl_family *family; /* private */ struct list_head list; /* private */ char name[GENL_NAMSIZ]; diff --git a/datapath/linux/compat/include/net/netlink.h b/datapath/linux/compat/include/net/netlink.h index be118edcb..8bd6baab0 100644 --- a/datapath/linux/compat/include/net/netlink.h +++ b/datapath/linux/compat/include/net/netlink.h @@ -31,17 +31,17 @@ #ifndef NLA_PUT_BE16 #define NLA_PUT_BE16(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, __be16, attrtype, value) + NLA_PUT_TYPE(skb, __be16, attrtype, value) #endif /* !NLA_PUT_BE16 */ #ifndef NLA_PUT_BE32 #define NLA_PUT_BE32(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, __be32, attrtype, value) + NLA_PUT_TYPE(skb, __be32, attrtype, value) #endif /* !NLA_PUT_BE32 */ #ifndef NLA_PUT_BE64 #define NLA_PUT_BE64(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, __be64, attrtype, value) + NLA_PUT_TYPE(skb, __be64, attrtype, value) #endif /* !NLA_PUT_BE64 */ #ifndef HAVE_NLA_GET_BE16 @@ -51,7 +51,7 @@ */ static inline __be16 nla_get_be16(const struct nlattr *nla) { - return *(__be16 *) nla_data(nla); + return *(__be16 *) nla_data(nla); } #endif /* !HAVE_NLA_GET_BE16 */ @@ -62,7 +62,7 @@ static inline __be16 nla_get_be16(const struct nlattr *nla) */ static inline __be32 nla_get_be32(const struct nlattr *nla) { - return *(__be32 *) nla_data(nla); + return *(__be32 *) nla_data(nla); } #endif @@ -91,12 +91,12 @@ static inline __be32 nla_get_be32(const struct nlattr *nla) #define nla_get_be64 rpl_nla_get_be64 static inline __be64 nla_get_be64(const struct nlattr *nla) { - __be64 tmp; + __be64 tmp; /* The additional cast is necessary because */ - nla_memcpy(&tmp, (struct nlattr *) nla, sizeof(tmp)); + nla_memcpy(&tmp, (struct nlattr *) nla, sizeof(tmp)); - return tmp; + return tmp; } #endif @@ -107,13 +107,14 @@ static inline __be64 nla_get_be64(const struct nlattr *nla) */ static inline int nla_type(const struct nlattr *nla) { - return nla->nla_type & NLA_TYPE_MASK; + return nla->nla_type & NLA_TYPE_MASK; } #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) #define nla_parse_nested(tb, maxtype, nla, policy) \ - nla_parse_nested(tb, maxtype, (struct nlattr *)(nla), (struct nla_policy *)(policy)) + nla_parse_nested(tb, maxtype, (struct nlattr *)(nla), \ + (struct nla_policy *)(policy)) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) #define nla_parse_nested(tb, maxtype, nla, policy) \ nla_parse_nested(tb, maxtype, (struct nlattr *)(nla), policy) diff --git a/datapath/linux/compat/time.c b/datapath/linux/compat/time.c index ad3ee9440..490e1a4f7 100644 --- a/datapath/linux/compat/time.c +++ b/datapath/linux/compat/time.c @@ -22,7 +22,7 @@ * For negative values only the tv_sec field is negative ! */ void set_normalized_timespec(struct timespec *ts, - time_t sec, long nsec) + time_t sec, long nsec) { while (nsec >= NSEC_PER_SEC) { nsec -= NSEC_PER_SEC; diff --git a/datapath/tunnel.c b/datapath/tunnel.c index 372d90ed1..9bb9cef3a 100644 --- a/datapath/tunnel.c +++ b/datapath/tunnel.c @@ -108,7 +108,7 @@ static struct hh_cache *rt_hh(struct rtable *rt) #define rt_hh(rt) (rt_dst(rt).hh) #endif -static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport) +static struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport) { return vport_from_priv(tnl_vport); } @@ -116,13 +116,13 @@ static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport /* This is analogous to rtnl_dereference for the tunnel cache. It checks that * cache_lock is held, so it is only for update side code. */ -static inline struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport) +static struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport) { return rcu_dereference_protected(tnl_vport->cache, - lockdep_is_held(&tnl_vport->cache_lock)); + lockdep_is_held(&tnl_vport->cache_lock)); } -static inline void schedule_cache_cleaner(void) +static void schedule_cache_cleaner(void) { schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL); } @@ -206,10 +206,10 @@ static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable) static u32 port_hash(const struct port_lookup_key *key) { - return jhash2((u32*)key, (PORT_KEY_LEN / sizeof(u32)), 0); + return jhash2((u32 *)key, (PORT_KEY_LEN / sizeof(u32)), 0); } -static inline struct hlist_head *find_bucket(u32 hash) +static struct hlist_head *find_bucket(u32 hash) { return &port_table[(hash & (PORT_TABLE_SIZE - 1))]; } @@ -265,7 +265,7 @@ static struct vport *port_table_lookup(struct port_lookup_key *key, struct hlist_node *n; struct hlist_head *bucket; u32 hash = port_hash(key); - struct tnl_vport * tnl_vport; + struct tnl_vport *tnl_vport; bucket = find_bucket(hash); @@ -590,7 +590,8 @@ static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb, } #endif /* IPv6 */ -bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable, +bool tnl_frag_needed(struct vport *vport, + const struct tnl_mutable_config *mutable, struct sk_buff *skb, unsigned int mtu, __be64 flow_key) { unsigned int eth_hdr_len = ETH_HLEN; @@ -787,13 +788,13 @@ static void create_tunnel_header(const struct vport *vport, tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1); } -static inline void *get_cached_header(const struct tnl_cache *cache) +static void *get_cached_header(const struct tnl_cache *cache) { return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN); } -static inline bool check_cache_valid(const struct tnl_cache *cache, - const struct tnl_mutable_config *mutable) +static bool check_cache_valid(const struct tnl_cache *cache, + const struct tnl_mutable_config *mutable) { struct hh_cache *hh; @@ -848,8 +849,7 @@ static void cache_cleaner(struct work_struct *work) rcu_read_unlock(); } -static inline void create_eth_hdr(struct tnl_cache *cache, - struct hh_cache *hh) +static void create_eth_hdr(struct tnl_cache *cache, struct hh_cache *hh) { void *cache_data = get_cached_header(cache); int hh_off; @@ -975,8 +975,8 @@ static struct rtable *__find_route(const struct tnl_mutable_config *mutable, u8 ipproto, u8 tos) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) - struct flowi fl = { .nl_u = { .ip4_u = - { .daddr = mutable->key.daddr, + struct flowi fl = { .nl_u = { .ip4_u = { + .daddr = mutable->key.daddr, .saddr = mutable->key.saddr, .tos = tos } }, .proto = ipproto }; @@ -1006,7 +1006,8 @@ static struct rtable *find_route(struct vport *vport, *cache = NULL; tos = RT_TOS(tos); - if (likely(tos == mutable->tos && check_cache_valid(cur_cache, mutable))) { + if (likely(tos == mutable->tos && + check_cache_valid(cur_cache, mutable))) { *cache = cur_cache; return cur_cache->rt; } else { @@ -1023,7 +1024,7 @@ static struct rtable *find_route(struct vport *vport, } } -static inline bool need_linearize(const struct sk_buff *skb) +static bool need_linearize(const struct sk_buff *skb) { int i; @@ -1275,14 +1276,16 @@ int tnl_send(struct vport *vport, struct sk_buff *skb) iph->frag_off = frag_off; ip_select_ident(iph, &rt_dst(rt), NULL); - skb = tnl_vport->tnl_ops->update_header(vport, mutable, &rt_dst(rt), skb); + skb = tnl_vport->tnl_ops->update_header(vport, mutable, + &rt_dst(rt), skb); if (unlikely(!skb)) goto next; if (likely(cache)) { int orig_len = skb->len - cache->len; - struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev); + struct vport *cache_vport; + cache_vport = internal_dev_get_vport(rt_dst(rt).dev); skb->protocol = htons(ETH_P_IP); iph = ip_hdr(skb); iph->tot_len = htons(skb->len - skb_network_offset(skb)); @@ -1337,7 +1340,8 @@ static const struct nla_policy tnl_policy[OVS_TUNNEL_ATTR_MAX + 1] = { [OVS_TUNNEL_ATTR_TTL] = { .type = NLA_U8 }, }; -/* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be zeroed. */ +/* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be + * zeroed. */ static int tnl_set_config(struct nlattr *options, const struct tnl_ops *tnl_ops, const struct vport *cur_vport, struct tnl_mutable_config *mutable) @@ -1617,7 +1621,7 @@ void tnl_exit(void) int i; for (i = 0; i < PORT_TABLE_SIZE; i++) { - struct tnl_vport * tnl_vport; + struct tnl_vport *tnl_vport; struct hlist_head *hash_head; struct hlist_node *n; diff --git a/datapath/tunnel.h b/datapath/tunnel.h index f80df99bf..fff6ca6ca 100644 --- a/datapath/tunnel.h +++ b/datapath/tunnel.h @@ -35,8 +35,10 @@ #define TNL_T_KEY_MATCH (1 << 11) /* Private flags not exposed to userspace in this form. */ -#define TNL_F_IN_KEY_MATCH (1 << 16) /* Store the key in tun_id to match in flow table. */ -#define TNL_F_OUT_KEY_ACTION (1 << 17) /* Get the key from a SET_TUNNEL action. */ +#define TNL_F_IN_KEY_MATCH (1 << 16) /* Store the key in tun_id to + * match in flow table. */ +#define TNL_F_OUT_KEY_ACTION (1 << 17) /* Get the key from a SET_TUNNEL + * action. */ /* All public tunnel flags. */ #define TNL_F_PUBLIC (TNL_F_CSUM | TNL_F_TOS_INHERIT | TNL_F_TTL_INHERIT | \ @@ -57,7 +59,7 @@ struct port_lookup_key { u32 tunnel_type; }; -#define PORT_KEY_LEN (offsetof(struct port_lookup_key, tunnel_type) + \ +#define PORT_KEY_LEN (offsetof(struct port_lookup_key, tunnel_type) + \ FIELD_SIZEOF(struct port_lookup_key, tunnel_type)) /** @@ -153,7 +155,8 @@ struct tnl_cache { int len; /* Length of data to be memcpy'd from cache. */ int hh_len; /* Hardware hdr length, cached from hh_cache. */ - /* Sequence number of mutable->seq from which this cache was generated. */ + /* Sequence number of mutable->seq from which this cache was + * generated. */ unsigned mutable_seq; #ifdef HAVE_HH_SEQ @@ -216,7 +219,7 @@ struct tnl_vport { atomic_t frag_id; spinlock_t cache_lock; - struct tnl_cache __rcu *cache; /* Protected by RCU/cache_lock. */ + struct tnl_cache __rcu *cache; /* Protected by RCU/cache_lock. */ #ifdef NEED_CACHE_TIMEOUT /* diff --git a/datapath/vport-capwap.c b/datapath/vport-capwap.c index 8d78b6d10..191156b80 100644 --- a/datapath/vport-capwap.c +++ b/datapath/vport-capwap.c @@ -32,7 +32,7 @@ #define CAPWAP_FRAG_TIMEOUT (30 * HZ) #define CAPWAP_FRAG_MAX_MEM (256 * 1024) -#define CAPWAP_FRAG_PRUNE_MEM (192 *1024) +#define CAPWAP_FRAG_PRUNE_MEM (192 * 1024) #define CAPWAP_FRAG_SECRET_INTERVAL (10 * 60 * HZ) /* @@ -100,7 +100,7 @@ struct capwaphdr_wsi_key { /* Flag indicating a 64bit key is stored in WSI data field */ #define CAPWAP_WSI_F_KEY64 0x80 -static inline struct capwaphdr *capwap_hdr(const struct sk_buff *skb) +static struct capwaphdr *capwap_hdr(const struct sk_buff *skb) { return (struct capwaphdr *)(udp_hdr(skb) + 1); } @@ -170,7 +170,7 @@ static int capwap_hdr_len(const struct tnl_mutable_config *mutable) if (mutable->flags & TNL_F_CSUM) return -EINVAL; - /* if keys are specified, then add WSI field */ + /* if keys are specified, then add WSI field */ if (mutable->out_key || (mutable->flags & TNL_F_OUT_KEY_ACTION)) { size += sizeof(struct capwaphdr_wsi) + sizeof(struct capwaphdr_wsi_key); @@ -282,8 +282,7 @@ static int process_capwap_wsi(struct sk_buff *skb, __be64 *key) return 0; } -static inline struct sk_buff *process_capwap_proto(struct sk_buff *skb, - __be64 *key) +static struct sk_buff *process_capwap_proto(struct sk_buff *skb, __be64 *key) { struct capwaphdr *cwh = capwap_hdr(skb); int hdr_len = sizeof(struct udphdr); @@ -518,7 +517,7 @@ error: /* All of the following functions relate to fragmentation reassembly. */ -static inline struct frag_queue *ifq_cast(struct inet_frag_queue *ifq) +static struct frag_queue *ifq_cast(struct inet_frag_queue *ifq) { return container_of(ifq, struct frag_queue, ifq); } diff --git a/datapath/vport-internal_dev.c b/datapath/vport-internal_dev.c index 20bfabe08..26b432b7c 100644 --- a/datapath/vport-internal_dev.c +++ b/datapath/vport-internal_dev.c @@ -33,7 +33,7 @@ struct internal_dev { #endif }; -static inline struct internal_dev *internal_dev_priv(struct net_device *netdev) +static struct internal_dev *internal_dev_priv(struct net_device *netdev) { return netdev_priv(netdev); } @@ -138,7 +138,8 @@ static int internal_dev_change_mtu(struct net_device *netdev, int new_mtu) return 0; } -static int internal_dev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int internal_dev_do_ioctl(struct net_device *dev, + struct ifreq *ifr, int cmd) { if (dp_ioctl_hook) return dp_ioctl_hook(dev, ifr, cmd); @@ -212,7 +213,8 @@ static struct vport *internal_dev_create(const struct vport_parms *parms) struct internal_dev *internal_dev; int err; - vport = vport_alloc(sizeof(struct netdev_vport), &internal_vport_ops, parms); + vport = vport_alloc(sizeof(struct netdev_vport), + &internal_vport_ops, parms); if (IS_ERR(vport)) { err = PTR_ERR(vport); goto error; @@ -220,7 +222,8 @@ static struct vport *internal_dev_create(const struct vport_parms *parms) netdev_vport = netdev_vport_priv(vport); - netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev), parms->name, do_setup); + netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev), + parms->name, do_setup); if (!netdev_vport->dev) { err = -ENOMEM; goto error_free_vport; diff --git a/datapath/vport-netdev.c b/datapath/vport-netdev.c index 9c01079b0..a9d923865 100644 --- a/datapath/vport-netdev.c +++ b/datapath/vport-netdev.c @@ -25,10 +25,10 @@ #include "vport-netdev.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) && \ - !defined(HAVE_VLAN_BUG_WORKAROUND) + !defined(HAVE_VLAN_BUG_WORKAROUND) #include -static int vlan_tso __read_mostly = 0; +static int vlan_tso __read_mostly; module_param(vlan_tso, int, 0644); MODULE_PARM_DESC(vlan_tso, "Enable TSO for VLAN packets"); #else @@ -120,7 +120,8 @@ static struct vport *netdev_create(const struct vport_parms *parms) struct netdev_vport *netdev_vport; int err; - vport = vport_alloc(sizeof(struct netdev_vport), &netdev_vport_ops, parms); + vport = vport_alloc(sizeof(struct netdev_vport), + &netdev_vport_ops, parms); if (IS_ERR(vport)) { err = PTR_ERR(vport); goto error; @@ -262,7 +263,7 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) vport_receive(vport, skb); } -static inline unsigned packet_length(const struct sk_buff *skb) +static unsigned packet_length(const struct sk_buff *skb) { unsigned length = skb->len - ETH_HLEN; @@ -378,7 +379,7 @@ struct vport *netdev_get_vport(struct net_device *dev) #if IFF_BRIDGE_PORT != IFF_OVS_DATAPATH if (likely(dev->priv_flags & IFF_OVS_DATAPATH)) #else - if (likely(rcu_access_pointer(dev->rx_handler) == netdev_frame_hook)) + if (likely(rcu_access_pointer(dev->rx_handler) == netdev_frame_hook)) #endif return (struct vport *)rcu_dereference_rtnl(dev->rx_handler_data); else diff --git a/datapath/vport-patch.c b/datapath/vport-patch.c index 9554f1214..ba10903b9 100644 --- a/datapath/vport-patch.c +++ b/datapath/vport-patch.c @@ -41,7 +41,7 @@ static struct hlist_head *peer_table; static void update_peers(const char *name, struct vport *); -static inline struct patch_vport *patch_vport_priv(const struct vport *vport) +static struct patch_vport *patch_vport_priv(const struct vport *vport) { return vport_priv(vport); } @@ -127,7 +127,8 @@ static struct vport *patch_create(const struct vport_parms *parms) struct patch_config *patchconf; int err; - vport = vport_alloc(sizeof(struct patch_vport), &patch_vport_ops, parms); + vport = vport_alloc(sizeof(struct patch_vport), + &patch_vport_ops, parms); if (IS_ERR(vport)) { err = PTR_ERR(vport); goto error; diff --git a/datapath/vport.c b/datapath/vport.c index ad5a10e7a..31edc587a 100644 --- a/datapath/vport.c +++ b/datapath/vport.c @@ -159,7 +159,8 @@ static struct kobj_type brport_ktype = { * vport_priv(). vports that are no longer needed should be released with * vport_free(). */ -struct vport *vport_alloc(int priv_size, const struct vport_ops *ops, const struct vport_parms *parms) +struct vport *vport_alloc(int priv_size, const struct vport_ops *ops, + const struct vport_parms *parms) { struct vport *vport; size_t alloc_size; diff --git a/datapath/vport.h b/datapath/vport.h index 2c9c4aa1d..6391f71df 100644 --- a/datapath/vport.h +++ b/datapath/vport.h @@ -208,7 +208,8 @@ enum vport_err_type { VPORT_E_TX_ERROR, }; -struct vport *vport_alloc(int priv_size, const struct vport_ops *, const struct vport_parms *); +struct vport *vport_alloc(int priv_size, const struct vport_ops *, + const struct vport_parms *); void vport_free(struct vport *); #define VPORT_ALIGN 8 diff --git a/include/openvswitch/tunnel.h b/include/openvswitch/tunnel.h index 110e65277..b9a1c8ea3 100644 --- a/include/openvswitch/tunnel.h +++ b/include/openvswitch/tunnel.h @@ -63,10 +63,11 @@ enum { #define OVS_TUNNEL_ATTR_MAX (__OVS_TUNNEL_ATTR_MAX - 1) #define TNL_F_CSUM (1 << 0) /* Checksum packets. */ -#define TNL_F_TOS_INHERIT (1 << 1) /* Inherit the ToS from the inner packet. */ -#define TNL_F_TTL_INHERIT (1 << 2) /* Inherit the TTL from the inner packet. */ -#define TNL_F_DF_INHERIT (1 << 3) /* Inherit the DF bit from the inner packet. */ -#define TNL_F_DF_DEFAULT (1 << 4) /* Set the DF bit if inherit off or not IP. */ +#define TNL_F_TOS_INHERIT (1 << 1) /* Inherit ToS from inner packet. */ +#define TNL_F_TTL_INHERIT (1 << 2) /* Inherit TTL from inner packet. */ +#define TNL_F_DF_INHERIT (1 << 3) /* Inherit DF bit from inner packet. */ +#define TNL_F_DF_DEFAULT (1 << 4) /* Set DF bit if inherit off or + * not IP. */ #define TNL_F_PMTUD (1 << 5) /* Enable path MTU discovery. */ #define TNL_F_HDR_CACHE (1 << 6) /* Enable tunnel header caching. */ #define TNL_F_IPSEC (1 << 7) /* Traffic is IPsec encrypted. */