- race masking layer - debug app: PortSetDebug - implement GRETunnel app: app/gre_tunnel
Signed-off-by: Isaku Yamahata <[email protected]> --- Changes v3 -> v4: - duplicated output to tunnel port --- ryu/app/gre_tunnel.py | 928 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 928 insertions(+) create mode 100644 ryu/app/gre_tunnel.py diff --git a/ryu/app/gre_tunnel.py b/ryu/app/gre_tunnel.py new file mode 100644 index 0000000..48a7d9c --- /dev/null +++ b/ryu/app/gre_tunnel.py @@ -0,0 +1,928 @@ +# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation. +# Copyright (C) 2012 Isaku Yamahata <yamahata at private email ne jp> +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from collections import defaultdict + +from ryu import exception as ryu_exc +from ryu.app.rest_nw_id import (NW_ID_VPORT_GRE, + RESERVED_NETWORK_IDS) +from ryu.base import app_manager +from ryu.controller import (dispatcher, + dpset, + event, + handler, + network, + ofp_event, + tunnels) +from ryu.ofproto import nx_match +from ryu.lib import dpid as dpid_lib +from ryu.lib import mac + + +LOG = logging.getLogger(__name__) + + +# Those events are higher level events than events of network tenant, +# tunnel ports as the race conditions are masked. +# Add event is generated only when all necessary informations are gathered, +# Del event is generated when any one of the informations are deleted. +# +# Example: ports for VMs +# there is a race condition between ofp port add/del event and +# register network_id for the port. + + +class EventTunnelKeyDel(event.EventBase): + def __init__(self, tunnel_key): + super(EventTunnelKeyDel, self).__init__() + self.tunnel_key = tunnel_key + + +class EventPortBase(event.EventBase): + def __init__(self, dpid, port_no): + super(EventPortBase, self).__init__() + self.dpid = dpid + self.port_no = port_no + + +class EventVMPort(EventPortBase): + def __init__(self, network_id, tunnel_key, + dpid, port_no, mac_address, add_del): + super(EventVMPort, self).__init__(dpid, port_no) + self.network_id = network_id + self.tunnel_key = tunnel_key + self.mac_address = mac_address + self.add_del = add_del + + def __str__(self): + return ('EventVMPort<dpid %s port_no %d ' + 'network_id %s tunnel_key %s mac %s add_del %s>' % + (dpid_lib.dpid_to_str(self.dpid), self.port_no, + self.network_id, self.tunnel_key, + mac.haddr_to_str(self.mac_address), self.add_del)) + + +class EventTunnelPort(EventPortBase): + def __init__(self, dpid, port_no, remote_dpid, add_del): + super(EventTunnelPort, self).__init__(dpid, port_no) + self.remote_dpid = remote_dpid + self.add_del = add_del + + def __str__(self): + return ('EventTunnelPort<dpid %s port_no %d remote_dpid %s ' + 'add_del %s>' % + (dpid_lib.dpid_to_str(self.dpid), self.port_no, + dpid_lib.dpid_to_str(self.remote_dpid), self.add_del)) + + +QUEUE_NAME_PORT_SET_EV = 'port_set_event' +DISPATCHER_NAME_PORT_SET_EV = 'port_set_event' +PORT_SET_EV_DISPATCHER = dispatcher.EventDispatcher( + DISPATCHER_NAME_PORT_SET_EV) + + +def _link_is_up(dpset_, dp, port_no): + try: + state = dpset_.get_port(dp.id, port_no).state + return not (state & dp.ofproto.OFPPS_LINK_DOWN) + except ryu_exc.PortNotFound: + return False + + +class PortSet(object): + def __init__(self, **kwargs): + super(PortSet, self).__init__() + self.nw = kwargs['network'] + self.tunnels = kwargs['tunnels'] + self.dpset = kwargs['dpset'] + self.ev_q = dispatcher.EventQueue(QUEUE_NAME_PORT_SET_EV, + PORT_SET_EV_DISPATCHER) + + def _check_link_state(self, dp, port_no, add_del): + if add_del: + # When adding port, the link should be UP. + return _link_is_up(self.dpset, dp, port_no) + else: + # When deleting port, the link status isn't cared. + return True + + # Tunnel port + # of connecting: self.dpids by (dpid, port_no) + # datapath: connected: EventDP event + # port status: UP: port add/delete/modify event + # remote dpid: self.tunnels by (dpid, port_no): tunnel port add/del even + def _tunnel_port_handler(self, dpid, port_no, add_del): + dp = self.dpset.get(dpid) + if dp is None: + return + if not self._check_link_state(dp, port_no, add_del): + return + try: + remote_dpid = self.tunnels.get_remote_dpid(dpid, port_no) + except ryu_exc.PortNotFound: + return + + self.ev_q.queue(EventTunnelPort(dpid, port_no, remote_dpid, add_del)) + + # VM port + # of connection: self.dpids by (dpid, port_no) + # datapath: connected: EventDP event + # port status: UP: Port add/delete/modify event + # network_id: self.nw by (dpid, port_no): network port add/del event + # mac_address: self.nw by (dpid, port_no): mac address add/del event + # tunnel key: from self.tunnels by network_id: tunnel key add/del event + def _vm_port_handler(self, dpid, port_no, + network_id, mac_address, add_del): + if network_id in RESERVED_NETWORK_IDS: + return + if mac_address is None: + return + dp = self.dpset.get(dpid) + if dp is None: + return + if dp.is_reserved_port(port_no): + return + if not self._check_link_state(dp, port_no, add_del): + return + try: + tunnel_key = self.tunnels.get_key(network_id) + except ryu_exc.TunnelKeyNotFound: + return + + self.ev_q.queue(EventVMPort(network_id, tunnel_key, dpid, + port_no, mac_address, add_del)) + + def _vm_port_mac_handler(self, dpid, port_no, network_id, add_del): + try: + mac_address = self.nw.get_mac(dpid, port_no) + except ryu_exc.PortNotFound: + return + self._vm_port_handler(dpid, port_no, network_id, mac_address, + add_del) + + def _port_handler(self, dpid, port_no, add_del): + """ + :type add_del: bool + :param add_del: True for add, False for del + """ + try: + port = self.nw.get_port(dpid, port_no) + except ryu_exc.PortNotFound: + return + + if port.network_id is None: + return + + if port.network_id == NW_ID_VPORT_GRE: + self._tunnel_port_handler(dpid, port_no, add_del) + return + + self._vm_port_handler(dpid, port_no, port.network_id, + port.mac_address, add_del) + + def _tunnel_key_del(self, tunnel_key): + self.ev_q.queue(EventTunnelKeyDel(tunnel_key)) + + # nw: network del + # port add/del (vm/tunnel port) + # mac address add/del(only vm port) + # tunnels: tunnel key add/del + # tunnel port add/del + # dpset: eventdp + # port add/delete/modify + + @handler.set_ev_cls(network.EventNetworkDel, + network.NETWORK_TENANT_EV_DISPATCHER) + def network_del_handler(self, ev): + network_id = ev.network_id + if network_id in RESERVED_NETWORK_IDS: + return + try: + tunnel_key = self.tunnels.get_key(network_id) + except ryu_exc.TunnelKeyNotFound: + return + for (dpid, port_no) in self.nw.list_ports(network_id): + self._vm_port_mac_handler(dpid, port_no, network_id, False) + self._tunnel_key_del(tunnel_key) + + @handler.set_ev_cls(network.EventNetworkPort, + network.NETWORK_TENANT_EV_DISPATCHER) + def network_port_handler(self, ev): + self._vm_port_mac_handler(ev.dpid, ev.port_no, ev.network_id, + ev.add_del) + + @handler.set_ev_cls(network.EventMacAddress, + network.NETWORK_TENANT_EV_DISPATCHER) + def network_mac_address_handler(self, ev): + self._vm_port_handler(ev.dpid, ev.port_no, ev.network_id, + ev.mac_address, ev.add_del) + + @handler.set_ev_cls(tunnels.EventTunnelKeyAdd, + tunnels.TUNNEL_EV_DISPATCHER) + def tunnel_key_add_handler(self, ev): + for (dpid, port_no) in self.nw.list_ports(ev.network_id): + self._vm_port_mac_handler(dpid, port_no, ev.network_id, True) + + @handler.set_ev_cls(tunnels.EventTunnelKeyDel, + tunnels.TUNNEL_EV_DISPATCHER) + def tunnel_key_del_handler(self, ev): + network_id = ev.network_id + for (dpid, port_no) in self.nw.list_ports(network_id): + self._vm_port_mac_handler(dpid, port_no, network_id, False) + if self.nw.has_networks(network_id): + self._tunnel_key_del(ev.tunnel_key) + + @handler.set_ev_cls(tunnels.EventTunnelPort, tunnels.TUNNEL_EV_DISPATCHER) + def tunnel_port_handler(self, ev): + self._port_handler(ev.dpid, ev.port_no, ev.add_del) + + @handler.set_ev_cls(dpset.EventDP, dpset.DPSET_EV_DISPATCHER) + def dp_handler(self, ev): + enter_leave = ev.enter_leave + if not enter_leave: + # TODO:XXX + # What to do on datapath disconnection? + LOG.debug('dp disconnection ev:%s', ev) + + dpid = ev.dp.id + ports = set(port.port_no for port in ev.ports) + ports.update(port.port_no for port in self.nw.get_ports(dpid)) + for port_no in ports: + self._port_handler(dpid, port_no, enter_leave) + + @handler.set_ev_cls(dpset.EventPortAdd, dpset.DPSET_EV_DISPATCHER) + def port_add_handler(self, ev): + self._port_handler(ev.dp.id, ev.port.port_no, True) + + @handler.set_ev_cls(dpset.EventPortDelete, dpset.DPSET_EV_DISPATCHER) + def port_del_handler(self, ev): + self._port_handler(ev.dp.id, ev.port.port_no, False) + + @handler.set_ev_cls(dpset.EventPortModify, dpset.DPSET_EV_DISPATCHER) + def port_modify_handler(self, ev): + # We don't know LINK status has been changed. + # So VM/TUNNEL port event can be triggered many times. + dp = ev.dp + port = ev.port + self._port_handler(dp.id, port.port_no, + not (port.state & dp.ofproto.OFPPS_LINK_DOWN)) + + +# class PortSetDebug(app_manager.RyuApp): +# """app for debug class PortSet""" +# _CONTEXTS = { +# 'netowrk': network.Network, +# 'dpset': dpset.DPSet, +# 'tunnels': tunnels.Tunnels, +# } + +# def __init__(self, *args, **kwargs): +# super(PortSetDebug, self).__init__(*args, **kwargs) +# self.nw = kwargs['network'] +# self.dpset = kwargs['dpset'] +# self.tunnels = kwargs['tunnels'] +# self.port_set = PortSet(**kwargs) +# handler.register_instance(self.port_set) + +# @handler.set_ev_cls(EventTunnelKeyDel, PORT_SET_EV_DISPATCHER) +# def tunnel_key_del_handler(self, ev): +# LOG.debug('tunnel_key_del ev %s', ev) + +# @handler.set_ev_cls(EventVMPort, PORT_SET_EV_DISPATCHER) +# def vm_port_handler(self, ev): +# LOG.debug('vm_port ev %s', ev) + +# @handler.set_ev_cls(EventTunnelPort, PORT_SET_EV_DISPATCHER) +# def tunnel_port_handler(self, ev): +# LOG.debug('tunnel_port ev %s', ev) + + +def cls_rule(in_port=None, tun_id=None, dl_src=None, dl_dst=None): + """Convenience function to initialize nx_match.ClsRule()""" + rule = nx_match.ClsRule() + if in_port is not None: + rule.set_in_port(in_port) + if tun_id is not None: + rule.set_tun_id(tun_id) + if dl_src is not None: + rule.set_dl_src(dl_src) + if dl_dst is not None: + rule.set_dl_dst(dl_dst) + return rule + + +class GRETunnel(app_manager.RyuApp): + """ + app for L2/L3 with gre tunneling + + PORTS + VM-port: the port which is connected to VM instance + TUNNEL-port: the ovs GRE vport + + TABLES: multi tables is used + SRC_TABLE: + This table is firstly used to match packets. + by in_port, determine which port the packet comes VM-port or + TUNNEL-port. + If the packet came from VM-port, set tunnel id based on which network + the VM belongs to, and send the packet to the tunnel out table. + If the packet came from TUNNEL-port and its tunnel id is known to this + switch, send the packet to local out table. Otherwise drop it. + + TUNNEL_OUT_TABLE: + This table looks at tunnel id and dl_dst, send the packet to tunnel + ports if necessary. And then, sends the packet to LOCAL_OUT_TABLE. + By matching the packet with tunnel_id and dl_dst, determine which + tunnel port the packet is send to. + + LOCAL_OUT_TABLE: + This table looks at tunnel id and dl_dst, send the packet to local + VM ports if necessary. Otherwise drop the packet. + + + The packet from vm port traverses as + SRC_TABLE -> TUNNEL_OUT_TABLE -> LOCAL_OUT_TABLE + + The packet from tunnel port traverses as + SRC_TABLE -> LOCAL_OUT_TABLE + + + The packet from vm port: + SRC_TABLE + match action + in_port(VM) & dl_src set_tunnel & goto TUNNEL_OUT_TABLE + in_port(VM) drop (catch-all drop rule) + + in_port(TUNNEL) & tun_id goto LOCAL_OUT_TABLE + in_port(TUNNEL) drop (catch-all drop rule) + + TUNNEL_OUT_TABLE + macth action + tun_id & dl_dst out tunnel port & goto LOCAL_OUT_TABLE + (unicast or broadcast) + tun_id goto LOCAL_OUT_TABLE (catch-all rule) + + LOCAL_OUT_TABLE + tun_id & dl_dst output(VM) (unicast or broadcast) + tun_id drop (catch-all drop rule) + + NOTE: + adding/deleting flow entries should be done carefully in certain order + such that packet in event should not be triggered. + """ + _CONTEXTS = { + 'netowrk': network.Network, + 'dpset': dpset.DPSet, + 'tunnels': tunnels.Tunnels, + } + + DEFAULT_COOKIE = 0 # cookie isn't used. Just set 0 + + # Tables + SRC_TABLE = 0 + TUNNEL_OUT_TABLE = 1 + LOCAL_OUT_TABLE = 2 + FLOW_TABLES = [SRC_TABLE, TUNNEL_OUT_TABLE, LOCAL_OUT_TABLE] + + # Priorities. The only inequality is important. + # '/ 2' is used just for easy looking instead of '- 1'. + # 0x7ffff vs 0x4000 + TABLE_DEFAULT_PRPIRITY = 32768 # = ofproto.OFP_DEFAULT_PRIORITY + + # SRC_TABLE for VM-port + SRC_PRI_MAC = TABLE_DEFAULT_PRPIRITY + SRC_PRI_DROP = TABLE_DEFAULT_PRPIRITY / 2 + # SRC_TABLE for TUNNEL-port + SRC_PRI_TUNNEL_PASS = TABLE_DEFAULT_PRPIRITY + SRC_PRI_TUNNEL_DROP = TABLE_DEFAULT_PRPIRITY / 2 + + # TUNNEL_OUT_TABLE + TUNNEL_OUT_PRI_MAC = TABLE_DEFAULT_PRPIRITY + TUNNEL_OUT_PRI_BROADCAST = TABLE_DEFAULT_PRPIRITY / 2 + TUNNEL_OUT_PRI_PASS = TABLE_DEFAULT_PRPIRITY / 4 + TUNNEL_OUT_PRI_DROP = TABLE_DEFAULT_PRPIRITY / 8 + + # LOCAL_OUT_TABLE + LOCAL_OUT_PRI_MAC = TABLE_DEFAULT_PRPIRITY + LOCAL_OUT_PRI_BROADCAST = TABLE_DEFAULT_PRPIRITY / 2 + LOCAL_OUT_PRI_DROP = TABLE_DEFAULT_PRPIRITY / 4 + + def __init__(self, *args, **kwargs): + super(GRETunnel, self).__init__(*args, **kwargs) + self.nw = kwargs['network'] + self.dpset = kwargs['dpset'] + self.tunnels = kwargs['tunnels'] + + self.port_set = PortSet(**kwargs) + handler.register_instance(self.port_set) + + # TODO: track active vm/tunnel ports + + @handler.set_ev_cls(dpset.EventDP, dpset.DPSET_EV_DISPATCHER) + def dp_handler(self, ev): + if not ev.enter_leave: + return + + # enable nicira extension + # TODO:XXX error handling + dp = ev.dp + ofproto = dp.ofproto + + dp.send_nxt_set_flow_format(ofproto.NXFF_NXM) + flow_mod_table_id = dp.ofproto_parser.NXTFlowModTableId(dp, 1) + dp.send_msg(flow_mod_table_id) + dp.send_barrier() + + # delete all flows in all tables + # current controller.handlers takes care of only table = 0 + for table in self.FLOW_TABLES: + rule = cls_rule() + self.send_flow_del(dp, rule, table, ofproto.OFPFC_DELETE, + None, None) + dp.send_barrier() + + @staticmethod + def _make_command(table, command): + return table << 8 | command + + def send_flow_mod(self, dp, rule, table, command, priority, actions): + command = self._make_command(table, command) + dp.send_flow_mod(rule=rule, cookie=self.DEFAULT_COOKIE, + command=command, idle_timeout=0, + hard_timeout=0, priority=priority, actions=actions) + + def send_flow_del(self, dp, rule, table, command, priority, out_port): + command = self._make_command(table, command) + dp.send_flow_mod(rule=rule, cookie=self.DEFAULT_COOKIE, + command=command, idle_timeout=0, + hard_timeout=0, priority=priority, out_port=out_port) + + def _list_tunnel_port(self, dp, remote_dpids): + dpid = dp.id + tunnel_ports = [] + for other_dpid in remote_dpids: + if other_dpid == dpid: + continue + other_dp = self.dpset.get(other_dpid) + if other_dp is None: + continue + try: + port_no = self.tunnels.get_port(dpid, other_dpid) + except ryu_exc.PortNotFound: + continue + if not self._link_is_up(dp, port_no): + continue + tunnel_ports.append(port_no) + + return tunnel_ports + + def _link_is_up(self, dp, port_no): + return _link_is_up(self.dpset, dp, port_no) + + def _vm_port_add(self, ev): + dpid = ev.dpid + dp = self.dpset.get(dpid) + assert dp is not None + ofproto = dp.ofproto + ofproto_parser = dp.ofproto_parser + mac_address = ev.mac_address + network_id = ev.network_id + tunnel_key = ev.tunnel_key + remote_dpids = self.nw.get_dpids(network_id) + remote_dpids.remove(dpid) + + # LOCAL_OUT_TABLE: unicast + rule = cls_rule(tun_id=tunnel_key, dl_dst=mac_address) + actions = [ofproto_parser.OFPActionOutput(ev.port_no)] + self.send_flow_mod(dp, rule, self.LOCAL_OUT_TABLE, ofproto.OFPFC_ADD, + self.LOCAL_OUT_PRI_MAC, actions) + + # LOCAL_OUT_TABLE: broad cast + rule = cls_rule(tun_id=tunnel_key, dl_dst=mac.BROADCAST) + actions = [] + for port in self.nw.get_ports(dpid): + if (port.network_id != network_id or port.mac_address is None): + continue + if not self._link_is_up(dp, port.port_no): + continue + actions.append(ofproto_parser.OFPActionOutput(port.port_no)) + + first_instance = (len(actions) == 1) + assert actions + if first_instance: + command = ofproto.OFPFC_ADD + else: + command = ofproto.OFPFC_MODIFY_STRICT + self.send_flow_mod(dp, rule, self.LOCAL_OUT_TABLE, command, + self.LOCAL_OUT_PRI_BROADCAST, actions) + + # LOCAL_OUT_TABLE: multicast TODO:XXX + + # LOCAL_OUT_TABLE: catch-all drop + if first_instance: + rule = cls_rule(tun_id=tunnel_key) + self.send_flow_mod(dp, rule, self.LOCAL_OUT_TABLE, + ofproto.OFPFC_ADD, self.LOCAL_OUT_PRI_DROP, []) + + # TUNNEL_OUT_TABLE: unicast + for remote_dpid in remote_dpids: + remote_dp = self.dpset.get(remote_dpid) + if remote_dp is None: + continue + try: + tunnel_port_no = self.tunnels.get_port(dpid, remote_dpid) + except ryu_exc.PortNotFound: + continue + if not self._link_is_up(dp, tunnel_port_no): + continue + + for port in self.nw.get_ports(remote_dpid): + if port.network_id != network_id or port.mac_address is None: + continue + if not self._link_is_up(remote_dp, port.port_no): + continue + # TUNNEL_OUT_TABLE: unicast + rule = cls_rule(tun_id=tunnel_key, dl_dst=port.mac_address) + output = ofproto_parser.OFPActionOutput(tunnel_port_no) + resubmit_table = ofproto_parser.NXActionResubmitTable( + in_port=ofproto.OFPP_IN_PORT, table=self.LOCAL_OUT_TABLE) + actions = [output, resubmit_table] + self.send_flow_mod(dp, rule, self.TUNNEL_OUT_TABLE, + ofproto.OFPFC_ADD, self.TUNNEL_OUT_PRI_MAC, + actions) + + if first_instance: + # SRC_TABLE: TUNNEL-port: resubmit to LOAL_OUT_TABLE + rule = cls_rule(in_port=tunnel_port_no, tun_id=tunnel_key) + resubmit_table = ofproto_parser.NXActionResubmitTable( + in_port=ofproto.OFPP_IN_PORT, table=self.LOCAL_OUT_TABLE) + actions = [resubmit_table] + self.send_flow_mod(dp, rule, self.SRC_TABLE, + ofproto.OFPFC_ADD, self.SRC_PRI_TUNNEL_PASS, + actions) + + if first_instance: + # TUNNEL_OUT_TABLE: catch-all(resubmit to LOCAL_OUT_TABLE) + rule = cls_rule(tun_id=tunnel_key) + resubmit_table = ofproto_parser.NXActionResubmitTable( + in_port=ofproto.OFPP_IN_PORT, table=self.LOCAL_OUT_TABLE) + actions = [resubmit_table] + self.send_flow_mod(dp, rule, self.TUNNEL_OUT_TABLE, + ofproto.OFPFC_ADD, + self.TUNNEL_OUT_PRI_PASS, actions) + + # TUNNEL_OUT_TABLE: broadcast + rule = cls_rule(tun_id=tunnel_key, dl_dst=mac.BROADCAST) + actions = [ofproto_parser.OFPActionOutput(tunnel_port_no) + for tunnel_port_no + in self._list_tunnel_port(dp, remote_dpids)] + resubmit_table = ofproto_parser.NXActionResubmitTable( + in_port=ofproto.OFPP_IN_PORT, table=self.LOCAL_OUT_TABLE) + actions.append(resubmit_table) + self.send_flow_mod(dp, rule, self.TUNNEL_OUT_TABLE, + ofproto.OFPFC_ADD, + self.TUNNEL_OUT_PRI_BROADCAST, actions) + + # TUNNEL_OUT_TABLE: multicast TODO:XXX + + # SRC_TABLE: VM-port unicast + dp.send_barrier() + rule = cls_rule(in_port=ev.port_no, dl_src=mac_address) + set_tunnel = ofproto_parser.NXActionSetTunnel(tunnel_key) + resubmit_table = ofproto_parser.NXActionResubmitTable( + in_port=ofproto.OFPP_IN_PORT, table=self.TUNNEL_OUT_TABLE) + actions = [set_tunnel, resubmit_table] + self.send_flow_mod(dp, rule, self.SRC_TABLE, ofproto.OFPFC_ADD, + self.SRC_PRI_MAC, actions) + + # SRC_TABLE: VM-port catch-call drop + rule = cls_rule(in_port=ev.port_no) + self.send_flow_mod(dp, rule, self.SRC_TABLE, ofproto.OFPFC_ADD, + self.SRC_PRI_DROP, []) + + # remote dp + for remote_dpid in remote_dpids: + remote_dp = self.dpset.get(remote_dpid) + if remote_dp is None: + continue + try: + tunnel_port_no = self.tunnels.get_port(remote_dpid, dpid) + except ryu_exc.PortNotFound: + continue + if not self._link_is_up(remote_dp, tunnel_port_no): + continue + + remote_ofproto = remote_dp.ofproto + remote_ofproto_parser = remote_dp.ofproto_parser + + # TUNNEL_OUT_TABLE: unicast + rule = cls_rule(tun_id=ev.tunnel_key, dl_dst=mac_address) + output = remote_ofproto_parser.OFPActionOutput(tunnel_port_no) + resubmit_table = remote_ofproto_parser.NXActionResubmitTable( + in_port=remote_ofproto.OFPP_IN_PORT, + table=self.LOCAL_OUT_TABLE) + actions = [output, resubmit_table] + self.send_flow_mod(remote_dp, rule, self.TUNNEL_OUT_TABLE, + remote_ofproto.OFPFC_ADD, + self.TUNNEL_OUT_PRI_MAC, actions) + + if first_instance: + # SRC_TABLE: TUNNEL-port + rule = cls_rule(in_port=tunnel_port_no, tun_id=ev.tunnel_key) + resubmit_table = remote_ofproto_parser.NXActionResubmitTable( + in_port=remote_ofproto.OFPP_IN_PORT, + table=self.LOCAL_OUT_TABLE) + actions = [resubmit_table] + self.send_flow_mod(remote_dp, rule, self.SRC_TABLE, + remote_ofproto.OFPFC_ADD, + self.SRC_PRI_TUNNEL_PASS, actions) + else: + continue + + # TUNNEL_OUT_TABLE: broadcast + rule = cls_rule(tun_id=ev.tunnel_key, dl_dst=mac.BROADCAST) + tunnel_ports = self._list_tunnel_port(remote_dp, remote_dpids) + if tunnel_port_no not in tunnel_ports: + tunnel_ports.append(tunnel_port_no) + actions = [remote_ofproto_parser.OFPActionOutput(port_no) + for port_no in tunnel_ports] + if len(actions) == 1: + command = remote_dp.ofproto.OFPFC_ADD + else: + command = remote_dp.ofproto.OFPFC_MODIFY_STRICT + resubmit_table = remote_ofproto_parser.NXActionResubmitTable( + in_port=remote_ofproto.OFPP_IN_PORT, + table=self.LOCAL_OUT_TABLE) + actions.append(resubmit_table) + self.send_flow_mod(remote_dp, rule, self.TUNNEL_OUT_TABLE, + command, self.TUNNEL_OUT_PRI_BROADCAST, actions) + + # TUNNEL_OUT_TABLE: multicast TODO:XXX + + def _vm_port_del(self, ev): + dpid = ev.dpid + dp = self.dpset.get(dpid) + assert dp is not None + ofproto = dp.ofproto + ofproto_parser = dp.ofproto_parser + mac_address = ev.mac_address + network_id = ev.network_id + tunnel_key = ev.tunnel_key + + local_ports = [] + for port in self.nw.get_ports(dpid): + if port.port_no == ev.port_no: + continue + if (port.network_id != network_id or port.mac_address is None): + continue + if not self._link_is_up(dp, port.port_no): + continue + local_ports.append(port.port_no) + + last_instance = not local_ports + + # SRC_TABLE: VM-port unicast and catch-call + rule = cls_rule(in_port=ev.port_no) + self.send_flow_mod(dp, rule, self.SRC_TABLE, ofproto.OFPFC_DELETE, + ofproto.OFP_DEFAULT_PRIORITY, + []) # priority is ignored + + if last_instance: + # SRC_TABLE: TUNNEL-port: all tunnel matching + rule = cls_rule(tun_id=tunnel_key) + self.send_flow_mod(dp, rule, self.SRC_TABLE, + ofproto.OFPFC_DELETE, + ofproto.OFP_DEFAULT_PRIORITY, + []) # priority is ignored + + # TUNNEL_OUT_TABLE: (tun_id & dl_dst) and tun_id + rule = cls_rule(tun_id=tunnel_key) + self.send_flow_mod(dp, rule, self.TUNNEL_OUT_TABLE, + ofproto.OFPFC_DELETE, + ofproto.OFP_DEFAULT_PRIORITY, + []) # priority is ignored + + # LOCAL_OUT: tun_id catch-all drop rule + rule = cls_rule(tun_id=tunnel_key) + self.send_flow_mod(dp, rule, self.LOCAL_OUT_TABLE, + ofproto.OFPFC_DELETE, + ofproto.OFP_DEFAULT_PRIORITY, + []) # priority is ignored + else: + # LOCAL_OUT_TABLE: unicast + rule = cls_rule(tun_id=tunnel_key, dl_src=mac_address) + self.send_flow_del(dp, rule, self.LOCAL_OUT_TABLE, + ofproto.OFPFC_DELETE_STRICT, + self.LOCAL_OUT_PRI_MAC, ev.port_no) + + # LOCAL_OUT_TABLE: broadcast + rule = cls_rule(tun_id=tunnel_key, dl_dst=mac.BROADCAST) + actions = [ofproto_parser.OFPActionOutput(port_no) + for port_no in local_ports] + self.send_flow_mod(dp, rule, self.LOCAL_OUT_TABLE, + ofproto.OFPFC_MODIFY_STRICT, + self.LOCAL_OUT_PRI_BROADCAST, actions) + + # LOCAL_OUT_TABLE: multicast TODO:XXX + + # remote dp + remote_dpids = self.nw.get_dpids(ev.network_id) + remote_dpids.remove(dpid) + for remote_dpid in remote_dpids: + remote_dp = self.dpset.get(remote_dpid) + if remote_dp is None: + continue + try: + tunnel_port_no = self.tunnels.get_port(remote_dpid, dpid) + except ryu_exc.PortNotFound: + continue + if not self._link_is_up(remote_dp, tunnel_port_no): + continue + + remote_ofproto = remote_dp.ofproto + remote_ofproto_parser = remote_dp.ofproto_parser + + if last_instance: + # SRC_TABLE: TUNNEL-port + rule = cls_rule(in_port=tunnel_port_no, tun_id=tunnel_key) + self.send_flow_del(remote_dp, rule, self.SRC_TABLE, + remote_ofproto.OFPFC_DELETE_STRICT, + self.SRC_PRI_TUNNEL_PASS, None) + + # SRC_TABLE: TUNNEL-port catch-call drop rule + rule = cls_rule(in_port=tunnel_port_no, tun_id=tunnel_key) + self.send_flow_del(remote_dp, rule, self.SRC_TABLE, + remote_ofproto.OFPFC_DELETE_STRICT, + self.SRC_PRI_TUNNEL_DROP, None) + + # TUNNEL_OUT_TABLE: broadcast + # tunnel_ports.remove(tunnel_port_no) + rule = cls_rule(tun_id=tunnel_key, dl_dst=mac.BROADCAST) + tunnel_ports = self._list_tunnel_port(remote_dp, + remote_dpids) + assert tunnel_port_no not in tunnel_ports + actions = [remote_ofproto_parser.OFPActionOutput(port_no) + for port_no in tunnel_ports] + if not actions: + command = remote_dp.ofproto.OFPFC_DELETE_STRICT + else: + command = remote_dp.ofproto.OFPFC_MODIFY_STRICT + resubmit_table = \ + remote_ofproto_parser.NXActionResubmitTable( + in_port=remote_ofproto.OFPP_IN_PORT, + table=self.LOCAL_OUT_TABLE) + actions.append(resubmit_table) + self.send_flow_mod(remote_dp, rule, self.TUNNEL_OUT_TABLE, + command, self.TUNNEL_OUT_PRI_BROADCAST, + actions) + + # TUNNEL_OUT_TABLE: unicast + rule = cls_rule(tun_id=tunnel_key, dl_dst=mac_address) + self.send_flow_del(remote_dp, rule, self.TUNNEL_OUT_TABLE, + remote_ofproto.OFPFC_DELETE_STRICT, + self.TUNNEL_OUT_PRI_MAC, tunnel_port_no) + + # TODO:XXX multicast + + def _get_vm_ports(self, dpid): + ports = defaultdict(list) + for port in self.nw.get_ports(dpid): + if port.network_id in RESERVED_NETWORK_IDS: + continue + ports[port.network_id].append(port) + return ports + + def _tunnel_port_add(self, ev): + dpid = ev.dpid + dp = self.dpset.get(dpid) + ofproto = dp.ofproto + ofproto_parser = dp.ofproto_parser + remote_dpid = ev.remote_dpid + + local_ports = self._get_vm_ports(dpid) + remote_ports = self._get_vm_ports(remote_dpid) + + # SRC_TABLE: TUNNEL-port catch-call drop rule + # ingress flow from this tunnel port: remote -> tunnel port + # drop if unknown tunnel_key + rule = cls_rule(in_port=ev.port_no) + self.send_flow_mod(dp, rule, self.SRC_TABLE, ofproto.OFPFC_ADD, + self.SRC_PRI_TUNNEL_DROP, []) + + # SRC_TABLE: TUNNEL-port: pass if known tunnel_key + for network_id in local_ports: + try: + tunnel_key = self.tunnels.get_key(network_id) + except ryu_exc.TunnelKeyNotFound: + continue + if network_id not in remote_ports: + continue + + rule = cls_rule(in_port=ev.port_no, tun_id=tunnel_key) + resubmit_table = ofproto_parser.NXActionResubmitTable( + in_port=ofproto.OFPP_IN_PORT, table=self.LOCAL_OUT_TABLE) + actions = [resubmit_table] + self.send_flow_mod(dp, rule, self.SRC_TABLE, ofproto.OFPFC_ADD, + self.SRC_PRI_TUNNEL_PASS, actions) + + # egress flow into this tunnel port: vm port -> tunnel port -> remote + for network_id in local_ports: + try: + tunnel_key = self.tunnels.get_key(network_id) + except ryu_exc.TunnelKeyNotFound: + continue + ports = remote_ports.get(network_id) + if ports is None: + continue + + # TUNNEL_OUT_TABLE: unicast + for port in ports: + if port.mac_address is None: + continue + rule = cls_rule(tun_id=tunnel_key, dl_dst=port.mac_address) + output = ofproto_parser.OFPActionOutput(ev.port_no) + resubmit_table = ofproto_parser.NXActionResubmitTable( + in_port=ofproto.OFPP_IN_PORT, table=self.LOCAL_OUT_TABLE) + actions = [output, resubmit_table] + self.send_flow_mod(dp, rule, self.TUNNEL_OUT_TABLE, + ofproto.OFPFC_ADD, self.TUNNEL_OUT_PRI_MAC, + actions) + + # TUNNEL_OUT_TABLE: broadcast + remote_dpids = self.nw.get_dpids(network_id) + remote_dpids.remove(dpid) + + rule = cls_rule(tun_id=tunnel_key, dl_dst=mac.BROADCAST) + tunnel_ports = self._list_tunnel_port(dp, remote_dpids) + if ev.port_no not in tunnel_ports: + tunnel_ports.append(ev.port_no) + actions = [ofproto_parser.OFPActionOutput(port_no) + for port_no in tunnel_ports] + resubmit_table = ofproto_parser.NXActionResubmitTable( + in_port=ofproto.OFPP_IN_PORT, table=self.LOCAL_OUT_TABLE) + actions.append(resubmit_table) + if len(tunnel_ports) == 1: + command = ofproto.OFPFC_ADD + else: + command = ofproto.OFPFC_MODIFY_STRICT + self.send_flow_mod(dp, rule, self.TUNNEL_OUT_TABLE, + command, self.TUNNEL_OUT_PRI_BROADCAST, actions) + + # TUNNEL_OUT_TABLE: multicast TODO:XXX + + def _tunnel_port_del(self, ev): + # almost nothing to do because all flow related to this tunnel port + # should be handled by self._vm_port_del() as tunnel port deletion + # follows vm port deletion. + # the tunnel port is deleted if and only if no instance of same + # tenants resides in both nodes of tunnel end points. + LOG.debug('tunnel_port_del %s', ev) + dp = self.dpset.get(ev.dpid) + + # SRC_TABLE: TUNNEL-port catch-all drop rule + rule = cls_rule(in_port=ev.port_no) + self.send_flow_mod(dp, rule, self.SRC_TABLE, + dp.ofproto.OFPFC_DELETE_STRICT, + self.SRC_PRI_TUNNEL_DROP, []) + + @handler.set_ev_cls(EventTunnelKeyDel, PORT_SET_EV_DISPATCHER) + def tunnel_key_del_handler(self, ev): + LOG.debug('tunnel_key_del ev %s', ev) + + @handler.set_ev_cls(EventVMPort, PORT_SET_EV_DISPATCHER) + def vm_port_handler(self, ev): + LOG.debug('vm_port ev %s', ev) + if ev.add_del: + self._vm_port_add(ev) + else: + self._vm_port_del(ev) + + @handler.set_ev_cls(EventTunnelPort, PORT_SET_EV_DISPATCHER) + def tunnel_port_handler(self, ev): + LOG.debug('tunnel_port ev %s', ev) + if ev.add_del: + self._tunnel_port_add(ev) + else: + self._tunnel_port_del(ev) + + @handler.set_ev_cls(ofp_event.EventOFPPacketIn, handler.MAIN_DISPATCHER) + def packet_in_handler(self, ev): + # for debug + msg = ev.msg + LOG.debug('packet in ev %s msg %s', ev, ev.msg) + if msg.buffer_id != 0xffffffff: # TODO:XXX use constant instead of -1 + msg.datapath.send_packet_out(msg.buffer_id, msg.in_port, []) -- 1.7.10.4 ------------------------------------------------------------------------------ Don't let slow site performance ruin your business. Deploy New Relic APM Deploy New Relic app performance management and know exactly what is happening inside your Ruby, Python, PHP, Java, and .NET app Try New Relic at no cost today and get our sweet Data Nerd shirt too! http://p.sf.net/sfu/newrelic-dev2dev _______________________________________________ Ryu-devel mailing list [email protected] https://lists.sourceforge.net/lists/listinfo/ryu-devel
