1.在ovs_dp_process_packet中查找kernel缓存流表,查到后执行ovs_execute_actions->do_execute_actions,其中有个actions是OVS_ACTION_ATTR_HASH

 /* Must be called with rcu_read_lock. */
void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
{
const struct vport *p = OVS_CB(skb)->input_vport;
struct datapath *dp = p->dp;
struct sw_flow *flow;
struct sw_flow_actions *sf_acts;
struct dp_stats_percpu *stats;
u64 *stats_counter;
u32 n_mask_hit; stats = this_cpu_ptr(dp->stats_percpu); /* Look up flow. */
flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
&n_mask_hit);
if (unlikely(!flow)) {
struct dp_upcall_info upcall;
int error; memset(&upcall, 0, sizeof(upcall));
upcall.cmd = OVS_PACKET_CMD_MISS;
upcall.portid = ovs_vport_find_upcall_portid(p, skb);
upcall.mru = OVS_CB(skb)->mru;
error = ovs_dp_upcall(dp, skb, key, &upcall, 0);
if (unlikely(error))
kfree_skb(skb);
else
consume_skb(skb);
stats_counter = &stats->n_missed;
goto out;
} ovs_flow_stats_update(flow, key->tp.flags, skb);
sf_acts = rcu_dereference(flow->sf_acts);
ovs_execute_actions(dp, skb, sf_acts, key); stats_counter = &stats->n_hit; out:
/* Update datapath statistics. */
u64_stats_update_begin(&stats->syncp);
(*stats_counter)++;
stats->n_mask_hit += n_mask_hit;
u64_stats_update_end(&stats->syncp);
}
 /* Execute a list of actions against 'skb'. */
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
const struct sw_flow_actions *acts,
struct sw_flow_key *key)
{
int err, level; level = __this_cpu_inc_return(exec_actions_level);
if (unlikely(level > OVS_RECURSION_LIMIT)) {
net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
ovs_dp_name(dp));
kfree_skb(skb);
err = -ENETDOWN;
goto out;
} OVS_CB(skb)->acts_origlen = acts->orig_len;
err = do_execute_actions(dp, skb, key,
acts->actions, acts->actions_len); if (level == 1)
process_deferred_actions(dp); out:
__this_cpu_dec(exec_actions_level);
return err;
}

2.do_execute_actions中会调用execute_hash

 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
const struct nlattr *attr)
{
struct ovs_action_hash *hash_act = nla_data(attr);
u32 hash = 0; /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
hash = skb_get_hash(skb);
hash = jhash_1word(hash, hash_act->hash_basis);
if (!hash)
hash = 0x1; key->ovs_flow_hash = hash;
}

3.该action仅对key的ovs_flow_hash成员变量进行了修改,从该变量的使用地方逆推,最终是queue_userspace_packet会使用,该函数是把报文发送给用户态进程,本次就看下queue_userspace_packet函数是如何使用到该成员变量的

 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
const struct sw_flow_key *key,
const struct dp_upcall_info *upcall_info,
uint32_t cutlen)
{
struct ovs_header *upcall;
struct sk_buff *nskb = NULL;
struct sk_buff *user_skb = NULL; /* to be queued to userspace */
struct nlattr *nla;
size_t len;
unsigned int hlen;
int err, dp_ifindex; dp_ifindex = get_dpifindex(dp);
if (!dp_ifindex)
return -ENODEV; if (skb_vlan_tag_present(skb)) {
nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb)
return -ENOMEM; nskb = __vlan_hwaccel_push_inside(nskb);
if (!nskb)
return -ENOMEM; skb = nskb;
} if (nla_attr_size(skb->len) > USHRT_MAX) {
err = -EFBIG;
goto out;
} /* Complete checksum if needed */
if (skb->ip_summed == CHECKSUM_PARTIAL &&
(err = skb_csum_hwoffload_help(skb, 0)))
goto out; /* Older versions of OVS user space enforce alignment of the last
* Netlink attribute to NLA_ALIGNTO which would require extensive
* padding logic. Only perform zerocopy if padding is not required.
*/
if (dp->user_features & OVS_DP_F_UNALIGNED)
hlen = skb_zerocopy_headlen(skb);
else
hlen = skb->len; len = upcall_msg_size(upcall_info, hlen - cutlen,
OVS_CB(skb)->acts_origlen);
user_skb = genlmsg_new(len, GFP_ATOMIC);
if (!user_skb) {
err = -ENOMEM;
goto out;
} upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
0, upcall_info->cmd);
upcall->dp_ifindex = dp_ifindex; err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
BUG_ON(err); if (upcall_info->userdata)
__nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
nla_len(upcall_info->userdata),
nla_data(upcall_info->userdata)); if (upcall_info->egress_tun_info) {
nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
err = ovs_nla_put_tunnel_info(user_skb,
upcall_info->egress_tun_info);
BUG_ON(err);
nla_nest_end(user_skb, nla);
} if (upcall_info->actions_len) {
nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
err = ovs_nla_put_actions(upcall_info->actions,
upcall_info->actions_len,
user_skb);
if (!err)
nla_nest_end(user_skb, nla);
else
nla_nest_cancel(user_skb, nla);
} /* Add OVS_PACKET_ATTR_MRU */
if (upcall_info->mru) {
if (nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU,
upcall_info->mru)) {
err = -ENOBUFS;
goto out;
}
pad_packet(dp, user_skb);
} /* Add OVS_PACKET_ATTR_LEN when packet is truncated */
if (cutlen > 0) {
if (nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN,
skb->len)) {
err = -ENOBUFS;
goto out;
}
pad_packet(dp, user_skb);
} /* Only reserve room for attribute header, packet data is added
* in skb_zerocopy()
*/
if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
err = -ENOBUFS;
goto out;
}
nla->nla_len = nla_attr_size(skb->len - cutlen); err = skb_zerocopy(user_skb, skb, skb->len - cutlen, hlen);
if (err)
goto out; /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
pad_packet(dp, user_skb); ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len; err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
user_skb = NULL;
out:
if (err)
skb_tx_error(skb);
kfree_skb(user_skb);
kfree_skb(nskb);
return err;
}

4.ovs_nla_put_key函数

 int ovs_nla_put_key(const struct sw_flow_key *swkey,
const struct sw_flow_key *output, int attr, bool is_mask,
struct sk_buff *skb)
{
int err;
struct nlattr *nla; nla = nla_nest_start(skb, attr);
if (!nla)
return -EMSGSIZE;
err = __ovs_nla_put_key(swkey, output, is_mask, skb);
if (err)
return err;
nla_nest_end(skb, nla); return 0;
}

5.__ovs_nla_put_key函数

 static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
const struct sw_flow_key *output, bool is_mask,
struct sk_buff *skb)
{
struct ovs_key_ethernet *eth_key;
struct nlattr *nla;
struct nlattr *encap = NULL;
struct nlattr *in_encap = NULL; if (nla_put_u32(skb, OVS_KEY_ATTR_RECIRC_ID, output->recirc_id))
goto nla_put_failure; if (nla_put_u32(skb, OVS_KEY_ATTR_DP_HASH, output->ovs_flow_hash))
goto nla_put_failure; if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
goto nla_put_failure; if ((swkey->tun_proto || is_mask)) {
const void *opts = NULL; if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
opts = TUN_METADATA_OPTS(output, swkey->tun_opts_len); if (ip_tun_to_nlattr(skb, &output->tun_key, opts,
swkey->tun_opts_len, swkey->tun_proto))
goto nla_put_failure;
} if (swkey->phy.in_port == DP_MAX_PORTS) {
if (is_mask && (output->phy.in_port == 0xffff))
if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff))
goto nla_put_failure;
} else {
u16 upper_u16;
upper_u16 = !is_mask ? 0 : 0xffff; if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT,
(upper_u16 << 16) | output->phy.in_port))
goto nla_put_failure;
} if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
goto nla_put_failure; if (ovs_ct_put_key(swkey, output, skb))
goto nla_put_failure; if (ovs_key_mac_proto(swkey) == MAC_PROTO_ETHERNET) {
nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
if (!nla)
goto nla_put_failure; eth_key = nla_data(nla);
ether_addr_copy(eth_key->eth_src, output->eth.src);
ether_addr_copy(eth_key->eth_dst, output->eth.dst); if (swkey->eth.vlan.tci || eth_type_vlan(swkey->eth.type)) {
if (ovs_nla_put_vlan(skb, &output->eth.vlan, is_mask))
goto nla_put_failure;
encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
if (!swkey->eth.vlan.tci)
goto unencap; if (swkey->eth.cvlan.tci || eth_type_vlan(swkey->eth.type)) {
if (ovs_nla_put_vlan(skb, &output->eth.cvlan, is_mask))
goto nla_put_failure;
in_encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
if (!swkey->eth.cvlan.tci)
goto unencap;
}
} if (swkey->eth.type == htons(ETH_P_802_2)) {
/*
* Ethertype 802.2 is represented in the netlink with omitted
* OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and
* 0xffff in the mask attribute. Ethertype can also
* be wildcarded.
*/
if (is_mask && output->eth.type)
if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE,
output->eth.type))
goto nla_put_failure;
goto unencap;
}
} if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
goto nla_put_failure; if (eth_type_vlan(swkey->eth.type)) {
/* There are 3 VLAN tags, we don't know anything about the rest
* of the packet, so truncate here.
*/
WARN_ON_ONCE(!(encap && in_encap));
goto unencap;
} if (swkey->eth.type == htons(ETH_P_IP)) {
struct ovs_key_ipv4 *ipv4_key; nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
if (!nla)
goto nla_put_failure;
ipv4_key = nla_data(nla);
ipv4_key->ipv4_src = output->ipv4.addr.src;
ipv4_key->ipv4_dst = output->ipv4.addr.dst;
ipv4_key->ipv4_proto = output->ip.proto;
ipv4_key->ipv4_tos = output->ip.tos;
ipv4_key->ipv4_ttl = output->ip.ttl;
ipv4_key->ipv4_frag = output->ip.frag;
} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
struct ovs_key_ipv6 *ipv6_key; nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
if (!nla)
goto nla_put_failure;
ipv6_key = nla_data(nla);
memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src,
sizeof(ipv6_key->ipv6_src));
memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst,
sizeof(ipv6_key->ipv6_dst));
ipv6_key->ipv6_label = output->ipv6.label;
ipv6_key->ipv6_proto = output->ip.proto;
ipv6_key->ipv6_tclass = output->ip.tos;
ipv6_key->ipv6_hlimit = output->ip.ttl;
ipv6_key->ipv6_frag = output->ip.frag;
} else if (swkey->eth.type == htons(ETH_P_NSH)) {
if (nsh_key_to_nlattr(&output->nsh, is_mask, skb))
goto nla_put_failure;
} else if (swkey->eth.type == htons(ETH_P_ARP) ||
swkey->eth.type == htons(ETH_P_RARP)) {
struct ovs_key_arp *arp_key; nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
if (!nla)
goto nla_put_failure;
arp_key = nla_data(nla);
memset(arp_key, 0, sizeof(struct ovs_key_arp));
arp_key->arp_sip = output->ipv4.addr.src;
arp_key->arp_tip = output->ipv4.addr.dst;
arp_key->arp_op = htons(output->ip.proto);
ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha);
ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha);
} else if (eth_p_mpls(swkey->eth.type)) {
struct ovs_key_mpls *mpls_key; nla = nla_reserve(skb, OVS_KEY_ATTR_MPLS, sizeof(*mpls_key));
if (!nla)
goto nla_put_failure;
mpls_key = nla_data(nla);
mpls_key->mpls_lse = output->mpls.top_lse;
} if ((swkey->eth.type == htons(ETH_P_IP) ||
swkey->eth.type == htons(ETH_P_IPV6)) &&
swkey->ip.frag != OVS_FRAG_TYPE_LATER) { if (swkey->ip.proto == IPPROTO_TCP) {
struct ovs_key_tcp *tcp_key; nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
if (!nla)
goto nla_put_failure;
tcp_key = nla_data(nla);
tcp_key->tcp_src = output->tp.src;
tcp_key->tcp_dst = output->tp.dst;
if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
output->tp.flags))
goto nla_put_failure;
} else if (swkey->ip.proto == IPPROTO_UDP) {
struct ovs_key_udp *udp_key; nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
if (!nla)
goto nla_put_failure;
udp_key = nla_data(nla);
udp_key->udp_src = output->tp.src;
udp_key->udp_dst = output->tp.dst;
} else if (swkey->ip.proto == IPPROTO_SCTP) {
struct ovs_key_sctp *sctp_key; nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key));
if (!nla)
goto nla_put_failure;
sctp_key = nla_data(nla);
sctp_key->sctp_src = output->tp.src;
sctp_key->sctp_dst = output->tp.dst;
} else if (swkey->eth.type == htons(ETH_P_IP) &&
swkey->ip.proto == IPPROTO_ICMP) {
struct ovs_key_icmp *icmp_key; nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
if (!nla)
goto nla_put_failure;
icmp_key = nla_data(nla);
icmp_key->icmp_type = ntohs(output->tp.src);
icmp_key->icmp_code = ntohs(output->tp.dst);
} else if (swkey->eth.type == htons(ETH_P_IPV6) &&
swkey->ip.proto == IPPROTO_ICMPV6) {
struct ovs_key_icmpv6 *icmpv6_key; nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
sizeof(*icmpv6_key));
if (!nla)
goto nla_put_failure;
icmpv6_key = nla_data(nla);
icmpv6_key->icmpv6_type = ntohs(output->tp.src);
icmpv6_key->icmpv6_code = ntohs(output->tp.dst); if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
struct ovs_key_nd *nd_key; nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
if (!nla)
goto nla_put_failure;
nd_key = nla_data(nla);
memcpy(nd_key->nd_target, &output->ipv6.nd.target,
sizeof(nd_key->nd_target));
ether_addr_copy(nd_key->nd_sll, output->ipv6.nd.sll);
ether_addr_copy(nd_key->nd_tll, output->ipv6.nd.tll);
}
}
} unencap:
if (in_encap)
nla_nest_end(skb, in_encap);
if (encap)
nla_nest_end(skb, encap); return 0; nla_put_failure:
return -EMSGSIZE;
}

openvswitch 源码分析 OVS_ACTION_ATTR_HASH action的更多相关文章

  1. vuex 源码分析(五) action 详解

    action类似于mutation,不同的是Action提交的是mutation,而不是直接变更状态,而且action里可以包含任意异步操作,每个mutation的参数1是一个对象,可以包含如下六个属 ...

  2. Net6 Configuration & Options 源码分析 Part2 Options

    Net6 Configuration & Options 源码分析 Part2 Options 第二部分主要记录Options 模型 OptionsConfigurationServiceCo ...

  3. Struts2 源码分析——DefaultActionInvocation类的执行action

    本章简言 上一章讲到关于拦截器的机制的知识点,让我们对拦截器有了一定的认识.我们也清楚的知道在执行用户action类实例之前,struts2会先去执行当前action类对应的拦截器.而关于在哪里执行a ...

  4. Struts2 源码分析——Action代理类的工作

    章节简言 上一章笔者讲到关于如何加载配置文件里面的package元素节点信息.相信读者到这里心里面对struts2在启动的时候加载相关的信息有了一定的了解和认识.而本章将讲到关于struts2启动成功 ...

  5. Struts2 源码分析——调结者(Dispatcher)之执行action

    章节简言 上一章笔者写关于Dispatcher类如何处理接受来的request请求.当然读者们也知道他并非正真的执行action操作.他只是在执行action操作之前的准备工作.那么谁才是正真的执行a ...

  6. 源码分析——Action代理类的工作

     Action代理类的新建 通过<Struts2 源码分析——调结者(Dispatcher)之执行action>章节我们知道执行action请求,最后会落到Dispatcher类的serv ...

  7. openVswitch(OVS)源码分析之工作流程(哈希桶结构体的解释)

    这篇blog是专门解决前篇openVswitch(OVS)源码分析之工作流程(哈希桶结构体的疑惑)中提到的哈希桶结构flex_array结构体成员变量含义的问题. 引用下前篇blog中分析讨论得到的f ...

  8. ABP源码分析十三:缓存Cache实现

    ABP中有两种cache的实现方式:MemroyCache 和 RedisCache. 如下图,两者都继承至ICache接口(准确说是CacheBase抽象类).ABP核心模块封装了MemroyCac ...

  9. ABP源码分析十五:ABP中的实用扩展方法

    类名 扩展的类型 方法名 参数 作用 XmlNodeExtensions XmlNode GetAttributeValueOrNull attributeName Gets an   attribu ...

随机推荐

  1. ViewPager欢迎界面

    一.几张图片组成欢迎界面 下方有几个点对应每个图片 当图片被选中时对应的点会变亮,当对应的点被点击时也会切换到指定画面 以下是代码 package com.example.viewpager_1; i ...

  2. Mina入门:mina版之HelloWorld[z]

    Mina入门:mina版之HelloWorld [z] 一,前言: 在完成上篇文章<Mina入门:Java NIO框架Mina.Netty.Grizzly简介与对比>之后,我们现在可以正式 ...

  3. Golang之http编程

    Go原生支持http.import("net/http") Go的http服务性能和nginx比较接近 几行代码就可以实现一个web服务 服务端http package main ...

  4. 《计算机网络》谢希仁(第7版) 第四章 c语言http://c.biancheng.net/cpp/html/3137.html

    第四章 网络层 电信网使用面向连接的通信方式,使电信网络能够向用户提供可靠传输的服务. 互联网设计思路:网络层向上只提供简单灵活的.无连接的.尽最大努力交付的数据报(分组)服务. 网络层不提供可靠传输 ...

  5. Jmeter将HTTP request报文体中的字符串转换为大写

    <awd><client id='${__javaScript("${IndividualID}".toUpperCase())}'><member ...

  6. js 数组去重的三种方法(unique)

    方法一: Array.prototype.unique=function(){ var arr=[];//新建一个临时数组 for(var i=0;i<this.length;i++){//遍历 ...

  7. Confluence无法打开编辑器,一直在转圈

    在管理员界面中,将Collaborative editing 设置为Off 或者 Limited . 快速找到该界面的方式是,在搜索框里搜索 “Collaborative editing”. 折腾了几 ...

  8. 2018.09.09 codeforces280C. Game on Tree(期望dp)

    传送门 期望dp经典题. 显然只需要算出每个点被染黑的期望步数. 点i被染黑的期望是1/(1到i这条链上的节点数)" role="presentation" style= ...

  9. (KMP灵活运用 利用Next数组 )Theme Section -- hdu -- 4763

    http://acm.hdu.edu.cn/showproblem.php?pid=4763 Theme Section Time Limit: 2000/1000 MS (Java/Others)  ...

  10. linux导出Excel The maximum column width for an individual cell is 255 characters

    linux环境到处Excel报错: The maximum column width for an individual cell is 255 characters 解决方案: for (int i ...