commit fa9001bb67de59599bbe132a994422f8a01c57b2 Author: campi Date: Sun May 26 16:10:42 2024 +0200 first diff --git a/mininet/__init__.py b/mininet/__init__.py new file mode 100644 index 0000000..c15ea6a --- /dev/null +++ b/mininet/__init__.py @@ -0,0 +1 @@ +"Docstring to silence pylint; ignores --ignore option for __init__.py" diff --git a/mininet/__main__.py b/mininet/__main__.py new file mode 120000 index 0000000..11bc8a7 --- /dev/null +++ b/mininet/__main__.py @@ -0,0 +1 @@ +../bin/mn \ No newline at end of file diff --git a/mininet/clean.py b/mininet/clean.py new file mode 100755 index 0000000..b48d702 --- /dev/null +++ b/mininet/clean.py @@ -0,0 +1,129 @@ +""" +Mininet Cleanup +author: Bob Lantz (rlantz@cs.stanford.edu) + +Unfortunately, Mininet and OpenFlow (and the Linux kernel) +don't always clean up properly after themselves. Until they do +(or until cleanup functionality is integrated into the Python +code), this script may be used to get rid of unwanted garbage. +It may also get rid of 'false positives', but hopefully +nothing irreplaceable! +""" + +from subprocess import ( Popen, PIPE, check_output as co, + CalledProcessError ) +import time + +from mininet.log import info +from mininet.term import cleanUpScreens +from mininet.util import decode + +def sh( cmd ): + "Print a command and send it to the shell" + info( cmd + '\n' ) + p = Popen( # pylint: disable=consider-using-with + [ '/bin/sh', '-c', cmd ], stdout=PIPE ) + result = p.communicate()[ 0 ] + return decode( result ) + +def killprocs( pattern ): + "Reliably terminate processes matching a pattern (including args)" + sh( 'pkill -9 -f %s' % pattern ) + # Make sure they are gone + while True: + try: + pids = co( [ 'pgrep', '-f', pattern ] ) + except CalledProcessError: + pids = '' + if pids: + sh( 'pkill -9 -f %s' % pattern ) + time.sleep( .5 ) + else: + break + +class Cleanup( object ): + "Wrapper for cleanup()" + + callbacks = [] + + @classmethod + def cleanup( cls): + """Clean up junk which might be left over from old runs; + do fast stuff before slow dp and link removal!""" + + info( "*** Removing excess controllers/ofprotocols/ofdatapaths/" + "pings/noxes\n" ) + zombies = ( 'controller ofprotocol ofdatapath ping nox_core ' + 'lt-nox_core ovs-openflowd ovs-controller ' + 'ovs-testcontroller udpbwtest mnexec ivs ryu-manager ' ) + # Note: real zombie processes can't actually be killed, since they + # are already (un)dead. Then again, + # you can't connect to them either, so they're mostly harmless. + # Send SIGTERM first to give processes a chance to shutdown cleanly. + sh( 'killall ' + zombies + ' 2> /dev/null' ) + time.sleep( 1 ) + sh( 'killall -9 ' + zombies + ' 2> /dev/null' ) + + # And kill off sudo mnexec + sh( 'pkill -9 -f "sudo mnexec"') + + info( "*** Removing junk from /tmp\n" ) + sh( 'rm -f /tmp/vconn* /tmp/vlogs* /tmp/*.out /tmp/*.log' ) + + info( "*** Removing old X11 tunnels\n" ) + cleanUpScreens() + + info( "*** Removing excess kernel datapaths\n" ) + dps = sh( "ps ax | egrep -o 'dp[0-9]+' | sed 's/dp/nl:/'" + ).splitlines() + for dp in dps: + if dp: + sh( 'dpctl deldp ' + dp ) + info( "*** Removing OVS datapaths\n" ) + dps = sh("ovs-vsctl --timeout=1 list-br").strip().splitlines() + if dps: + sh( "ovs-vsctl " + " -- ".join( "--if-exists del-br " + dp + for dp in dps if dp ) ) + # And in case the above didn't work... + dps = sh( "ovs-vsctl --timeout=1 list-br" ).strip().splitlines() + for dp in dps: + sh( 'ovs-vsctl del-br ' + dp ) + + info( "*** Removing all links of the pattern foo-ethX\n" ) + links = sh( "ip link show | " + "egrep -o '([-_.[:alnum:]]+-eth[[:digit:]]+)'" + ).splitlines() + # Delete blocks of links + n = 1000 # chunk size + for i in range( 0, len( links ), n ): + cmd = ';'.join( 'ip link del %s' % link + for link in links[ i : i + n ] ) + sh( '( %s ) 2> /dev/null' % cmd ) + + if 'tap9' in sh( 'ip link show' ): + info( "*** Removing tap9 - assuming it's from cluster edition\n" ) + sh( 'ip link del tap9' ) + + info( "*** Killing stale mininet node processes\n" ) + killprocs( 'mininet:' ) + + info( "*** Shutting down stale tunnels\n" ) + killprocs( 'Tunnel=Ethernet' ) + killprocs( '.ssh/mn') + sh( 'rm -f ~/.ssh/mn/*' ) + + # Call any additional cleanup code if necessary + for callback in cls.callbacks: + callback() + + info( "*** Cleanup complete.\n" ) + + @classmethod + def addCleanupCallback( cls, callback ): + "Add cleanup callback" + if callback not in cls.callbacks: + cls.callbacks.append( callback ) + + +cleanup = Cleanup.cleanup +addCleanupCallback = Cleanup.addCleanupCallback diff --git a/mininet/cli.py b/mininet/cli.py new file mode 100644 index 0000000..e39965c --- /dev/null +++ b/mininet/cli.py @@ -0,0 +1,503 @@ +""" +A simple command-line interface for Mininet. + +The Mininet CLI provides a simple control console which +makes it easy to talk to nodes. For example, the command + +mininet> h27 ifconfig + +runs 'ifconfig' on host h27. + +Having a single console rather than, for example, an xterm for each +node is particularly convenient for networks of any reasonable +size. + +The CLI automatically substitutes IP addresses for node names, +so commands like + +mininet> h2 ping h3 + +should work correctly and allow host h2 to ping host h3 + +Several useful commands are provided, including the ability to +list all nodes ('nodes'), to print out the network topology +('net') and to check connectivity ('pingall', 'pingpair') +and bandwidth ('iperf'.) +""" + +from subprocess import call +from cmd import Cmd +from os import isatty +from select import poll, POLLIN +import select +import errno +import sys +import time +import os +import atexit + +from mininet.log import info, output, error +from mininet.term import makeTerms, runX11 +from mininet.util import ( quietRun, dumpNodeConnections, + dumpPorts ) + +class CLI( Cmd ): + "Simple command-line interface to talk to nodes." + + prompt = 'mininet> ' + + def __init__( self, mininet, stdin=sys.stdin, script=None, + **kwargs ): + """Start and run interactive or batch mode CLI + mininet: Mininet network object + stdin: standard input for CLI + script: script to run in batch mode""" + self.mn = mininet + # Local variable bindings for py command + self.locals = { 'net': mininet } + # Attempt to handle input + self.inPoller = poll() + self.inPoller.register( stdin ) + self.inputFile = script + Cmd.__init__( self, stdin=stdin, **kwargs ) + info( '*** Starting CLI:\n' ) + + if self.inputFile: + self.do_source( self.inputFile ) + return + + self.initReadline() + self.run() + + readlineInited = False + + @classmethod + def initReadline( cls ): + "Set up history if readline is available" + # Only set up readline once to prevent multiplying the history file + if cls.readlineInited: + return + cls.readlineInited = True + try: + # pylint: disable=import-outside-toplevel + from readline import ( read_history_file, write_history_file, + set_history_length ) + except ImportError: + pass + else: + history_path = os.path.expanduser( '~/.mininet_history' ) + if os.path.isfile( history_path ): + read_history_file( history_path ) + set_history_length( 1000 ) + + def writeHistory(): + "Write out history file" + try: + write_history_file( history_path ) + except IOError: + # Ignore probably spurious IOError + pass + atexit.register( writeHistory ) + + def run( self ): + "Run our cmdloop(), catching KeyboardInterrupt" + while True: + try: + # Make sure no nodes are still waiting + for node in self.mn.values(): + while node.waiting: + info( 'stopping', node, '\n' ) + node.sendInt() + node.waitOutput() + if self.isatty(): + quietRun( 'stty echo sane intr ^C' ) + self.cmdloop() + break + except KeyboardInterrupt: + # Output a message - unless it's also interrupted + # pylint: disable=broad-except + try: + output( '\nInterrupt\n' ) + except Exception: + pass + # pylint: enable=broad-except + + def emptyline( self ): + "Don't repeat last command when you hit return." + pass + + def getLocals( self ): + "Local variable bindings for py command" + self.locals.update( self.mn ) + return self.locals + + helpStr = ( + 'You may also send a command to a node using:\n' + ' command {args}\n' + 'For example:\n' + ' mininet> h1 ifconfig\n' + '\n' + 'The interpreter automatically substitutes IP addresses\n' + 'for node names when a node is the first arg, so commands\n' + 'like\n' + ' mininet> h2 ping h3\n' + 'should work.\n' + '\n' + 'Some character-oriented interactive commands require\n' + 'noecho:\n' + ' mininet> noecho h2 vi foo.py\n' + 'However, starting up an xterm/gterm is generally better:\n' + ' mininet> xterm h2\n\n' + ) + + def do_help( self, line ): # pylint: disable=arguments-renamed + "Describe available CLI commands." + Cmd.do_help( self, line ) + if line == '': + output( self.helpStr ) + + def do_nodes( self, _line ): + "List all nodes." + nodes = ' '.join( sorted( self.mn ) ) + output( 'available nodes are: \n%s\n' % nodes ) + + def do_ports( self, _line ): + "display ports and interfaces for each switch" + dumpPorts( self.mn.switches ) + + def do_net( self, _line ): + "List network connections." + dumpNodeConnections( self.mn.values() ) + + def do_sh( self, line ): + """Run an external shell command + Usage: sh [cmd args]""" + assert self # satisfy pylint and allow override + call( line, shell=True ) + + # do_py() and do_px() need to catch any exception during eval()/exec() + # pylint: disable=broad-except + + def do_py( self, line ): + """Evaluate a Python expression. + Node names may be used, e.g.: py h1.cmd('ls')""" + try: + # pylint: disable=eval-used + result = eval( line, globals(), self.getLocals() ) + if result is None: + return + elif isinstance( result, str ): + output( result + '\n' ) + else: + output( repr( result ) + '\n' ) + except Exception as e: + output( str( e ) + '\n' ) + + # We are in fact using the exec() pseudo-function + # pylint: disable=exec-used + + def do_px( self, line ): + """Execute a Python statement. + Node names may be used, e.g.: px print h1.cmd('ls')""" + try: + exec( line, globals(), self.getLocals() ) + except Exception as e: + output( str( e ) + '\n' ) + + # pylint: enable=broad-except,exec-used + + def do_pingall( self, line ): + "Ping between all hosts." + self.mn.pingAll( line ) + + def do_pingpair( self, _line ): + "Ping between first two hosts, useful for testing." + self.mn.pingPair() + + def do_pingallfull( self, _line ): + "Ping between all hosts, returns all ping results." + self.mn.pingAllFull() + + def do_pingpairfull( self, _line ): + "Ping between first two hosts, returns all ping results." + self.mn.pingPairFull() + + def do_iperf( self, line ): + """Simple iperf TCP test between two (optionally specified) hosts. + Usage: iperf node1 node2""" + args = line.split() + if not args: + self.mn.iperf() + elif len(args) == 2: + hosts = [] + err = False + for arg in args: + if arg not in self.mn: + err = True + error( "node '%s' not in network\n" % arg ) + else: + hosts.append( self.mn[ arg ] ) + if not err: + self.mn.iperf( hosts ) + else: + error( 'invalid number of args: iperf src dst\n' ) + + def do_iperfudp( self, line ): + """Simple iperf UDP test between two (optionally specified) hosts. + Usage: iperfudp bw node1 node2""" + args = line.split() + if not args: + self.mn.iperf( l4Type='UDP' ) + elif len(args) == 3: + udpBw = args[ 0 ] + hosts = [] + err = False + for arg in args[ 1:3 ]: + if arg not in self.mn: + err = True + error( "node '%s' not in network\n" % arg ) + else: + hosts.append( self.mn[ arg ] ) + if not err: + self.mn.iperf( hosts, l4Type='UDP', udpBw=udpBw ) + else: + error( 'invalid number of args: iperfudp bw src dst\n' + + 'bw examples: 10M\n' ) + + def do_intfs( self, _line ): + "List interfaces." + for node in self.mn.values(): + output( '%s: %s\n' % + ( node.name, ','.join( node.intfNames() ) ) ) + + def do_dump( self, _line ): + "Dump node info." + for node in self.mn.values(): + output( '%s\n' % repr( node ) ) + + def do_link( self, line ): + """Bring link(s) between two nodes up or down. + Usage: link node1 node2 [up/down]""" + args = line.split() + if len(args) != 3: + error( 'invalid number of args: link end1 end2 [up down]\n' ) + elif args[ 2 ] not in [ 'up', 'down' ]: + error( 'invalid type: link end1 end2 [up down]\n' ) + else: + self.mn.configLinkStatus( *args ) + + def do_xterm( self, line, term='xterm' ): + """Spawn xterm(s) for the given node(s). + Usage: xterm node1 node2 ...""" + args = line.split() + if not args: + error( 'usage: %s node1 node2 ...\n' % term ) + else: + for arg in args: + if arg not in self.mn: + error( "node '%s' not in network\n" % arg ) + else: + node = self.mn[ arg ] + self.mn.terms += makeTerms( [ node ], term = term ) + + def do_x( self, line ): + """Create an X11 tunnel to the given node, + optionally starting a client. + Usage: x node [cmd args]""" + args = line.split() + if not args: + error( 'usage: x node [cmd args]...\n' ) + else: + node = self.mn[ args[ 0 ] ] + cmd = args[ 1: ] + self.mn.terms += runX11( node, cmd ) + + def do_gterm( self, line ): + """Spawn gnome-terminal(s) for the given node(s). + Usage: gterm node1 node2 ...""" + self.do_xterm( line, term='gterm' ) + + def do_exit( self, _line ): + "Exit" + assert self # satisfy pylint and allow override + return 'exited by user command' + + def do_quit( self, line ): + "Exit" + return self.do_exit( line ) + + def do_EOF( self, line ): + "Exit" + output( '\n' ) + return self.do_exit( line ) + + def isatty( self ): + "Is our standard input a tty?" + return isatty( self.stdin.fileno() ) + + def do_noecho( self, line ): + """Run an interactive command with echoing turned off. + Usage: noecho [cmd args]""" + if self.isatty(): + quietRun( 'stty -echo' ) + self.default( line ) + if self.isatty(): + quietRun( 'stty echo' ) + + def do_source( self, line ): + """Read commands from an input file. + Usage: source """ + args = line.split() + if len(args) != 1: + error( 'usage: source \n' ) + return + try: + with open( args[ 0 ] ) as self.inputFile: + while True: + line = self.inputFile.readline() + if len( line ) > 0: + self.onecmd( line ) + else: + break + except IOError: + error( 'error reading file %s\n' % args[ 0 ] ) + self.inputFile.close() + self.inputFile = None + + def do_dpctl( self, line ): + """Run dpctl (or ovs-ofctl) command on all switches. + Usage: dpctl command [arg1] [arg2] ...""" + args = line.split() + if len(args) < 1: + error( 'usage: dpctl command [arg1] [arg2] ...\n' ) + return + for sw in self.mn.switches: + output( '*** ' + sw.name + ' ' + ('-' * 72) + '\n' ) + output( sw.dpctl( *args ) ) + + def do_time( self, line ): + "Measure time taken for any command in Mininet." + start = time.time() + self.onecmd(line) + elapsed = time.time() - start + self.stdout.write("*** Elapsed time: %0.6f secs\n" % elapsed) + + def do_links( self, _line ): + "Report on links" + for link in self.mn.links: + output( link, link.status(), '\n' ) + + def do_switch( self, line ): + "Starts or stops a switch" + args = line.split() + if len(args) != 2: + error( 'invalid number of args: switch ' + '{start, stop}\n' ) + return + sw = args[ 0 ] + command = args[ 1 ] + if sw not in self.mn or self.mn.get( sw ) not in self.mn.switches: + error( 'invalid switch: %s\n' % args[ 1 ] ) + else: + sw = args[ 0 ] + command = args[ 1 ] + if command == 'start': + self.mn.get( sw ).start( self.mn.controllers ) + elif command == 'stop': + self.mn.get( sw ).stop( deleteIntfs=False ) + else: + error( 'invalid command: ' + 'switch {start, stop}\n' ) + + def do_wait( self, _line ): + "Wait until all switches have connected to a controller" + self.mn.waitConnected() + + def default( self, line ): + """Called on an input line when the command prefix is not recognized. + Overridden to run shell commands when a node is the first + CLI argument. Past the first CLI argument, node names are + automatically replaced with corresponding IP addrs.""" + + first, args, line = self.parseline( line ) + + if first in self.mn: + if not args: + error( '*** Please enter a command for node: %s \n' + % first ) + return + node = self.mn[ first ] + rest = args.split( ' ' ) + # Substitute IP addresses for node names in command + # If updateIP() returns None, then use node name + rest = [ self.mn[ arg ].defaultIntf().updateIP() or arg + if arg in self.mn else arg + for arg in rest ] + rest = ' '.join( rest ) + # Run cmd on node: + node.sendCmd( rest ) + self.waitForNode( node ) + else: + error( '*** Unknown command: %s\n' % line ) + + def waitForNode( self, node ): + "Wait for a node to finish, and print its output." + # Pollers + nodePoller = poll() + nodePoller.register( node.stdout ) + bothPoller = poll() + bothPoller.register( self.stdin, POLLIN ) + bothPoller.register( node.stdout, POLLIN ) + if self.isatty(): + # Buffer by character, so that interactive + # commands sort of work + quietRun( 'stty -icanon min 1' ) + while True: + try: + bothPoller.poll() + # XXX BL: this doesn't quite do what we want. + # pylint: disable=condition-evals-to-constant + if False and self.inputFile: + key = self.inputFile.read( 1 ) + if key != '': + node.write( key ) + else: + self.inputFile = None + # pylint: enable=condition-evals-to-constant + if isReadable( self.inPoller ): + key = self.stdin.read( 1 ) + node.write( key ) + if isReadable( nodePoller ): + data = node.monitor() + output( data ) + if not node.waiting: + break + except KeyboardInterrupt: + # There is an at least one race condition here, since + # it's possible to interrupt ourselves after we've + # read data but before it has been printed. + node.sendInt() + except select.error as e: + # pylint: disable=unpacking-non-sequence + # pylint: disable=unbalanced-tuple-unpacking + errno_, errmsg = e.args + if errno_ != errno.EINTR: + error( "select.error: %s, %s" % (errno_, errmsg) ) + node.sendInt() + + def precmd( self, line ): + "allow for comments in the cli" + if '#' in line: + line = line.split( '#' )[ 0 ] + return line + + +# Helper functions + +def isReadable( poller ): + "Check whether a Poll object has a readable fd." + for fdmask in poller.poll( 0 ): + mask = fdmask[ 1 ] + if mask & POLLIN: + return True + return False diff --git a/mininet/examples b/mininet/examples new file mode 120000 index 0000000..a6573af --- /dev/null +++ b/mininet/examples @@ -0,0 +1 @@ +../examples \ No newline at end of file diff --git a/mininet/link.py b/mininet/link.py new file mode 100644 index 0000000..3b0e13a --- /dev/null +++ b/mininet/link.py @@ -0,0 +1,582 @@ +""" +link.py: interface and link abstractions for mininet + +It seems useful to bundle functionality for interfaces into a single +class. + +Also it seems useful to enable the possibility of multiple flavors of +links, including: + +- simple veth pairs +- tunneled links +- patchable links (which can be disconnected and reconnected via a patchbay) +- link simulators (e.g. wireless) + +Basic division of labor: + + Nodes: know how to execute commands + Intfs: know how to configure themselves + Links: know how to connect nodes together + +Intf: basic interface object that can configure itself +TCIntf: interface with bandwidth limiting and delay via tc + +Link: basic link class for creating veth pairs +""" + +import re + +from mininet.log import info, error, debug +from mininet.util import makeIntfPair + +# Make pylint happy: +# pylint: disable=too-many-arguments + + +class Intf( object ): + + "Basic interface object that can configure itself." + + def __init__( self, name, node=None, port=None, link=None, + mac=None, **params ): + """name: interface name (e.g. h1-eth0) + node: owning node (where this intf most likely lives) + link: parent link if we're part of a link + other arguments are passed to config()""" + self.node = node + self.name = name + self.link = link + self.mac = mac + self.ip, self.prefixLen = None, None + + # if interface is lo, we know the ip is 127.0.0.1. + # This saves an ifconfig command per node + if self.name == 'lo': + self.ip = '127.0.0.1' + self.prefixLen = 8 + # Add to node (and move ourselves if necessary ) + if node: + moveIntfFn = params.pop( 'moveIntfFn', None ) + if moveIntfFn: + node.addIntf( self, port=port, moveIntfFn=moveIntfFn ) + else: + node.addIntf( self, port=port ) + # Save params for future reference + self.params = params + self.config( **params ) + + def cmd( self, *args, **kwargs ): + "Run a command in our owning node" + return self.node.cmd( *args, **kwargs ) + + def ifconfig( self, *args ): + "Configure ourselves using ifconfig" + return self.cmd( 'ifconfig', self.name, *args ) + + def setIP( self, ipstr, prefixLen=None ): + """Set our IP address""" + # This is a sign that we should perhaps rethink our prefix + # mechanism and/or the way we specify IP addresses + if '/' in ipstr: + self.ip, self.prefixLen = ipstr.split( '/' ) + return self.ifconfig( ipstr, 'up' ) + else: + if prefixLen is None: + raise Exception( 'No prefix length set for IP address %s' + % ( ipstr, ) ) + self.ip, self.prefixLen = ipstr, prefixLen + return self.ifconfig( '%s/%s' % ( ipstr, prefixLen ) ) + + def setMAC( self, macstr ): + """Set the MAC address for an interface. + macstr: MAC address as string""" + self.mac = macstr + return ( self.ifconfig( 'down' ) + + self.ifconfig( 'hw', 'ether', macstr ) + + self.ifconfig( 'up' ) ) + + _ipMatchRegex = re.compile( r'\d+\.\d+\.\d+\.\d+' ) + _macMatchRegex = re.compile( r'..:..:..:..:..:..' ) + + def updateIP( self ): + "Return updated IP address based on ifconfig" + # use pexec instead of node.cmd so that we dont read + # backgrounded output from the cli. + ifconfig, _err, _exitCode = self.node.pexec( + 'ifconfig %s' % self.name ) + ips = self._ipMatchRegex.findall( ifconfig ) + self.ip = ips[ 0 ] if ips else None + return self.ip + + def updateMAC( self ): + "Return updated MAC address based on ifconfig" + ifconfig = self.ifconfig() + macs = self._macMatchRegex.findall( ifconfig ) + self.mac = macs[ 0 ] if macs else None + return self.mac + + # Instead of updating ip and mac separately, + # use one ifconfig call to do it simultaneously. + # This saves an ifconfig command, which improves performance. + + def updateAddr( self ): + "Return IP address and MAC address based on ifconfig." + ifconfig = self.ifconfig() + ips = self._ipMatchRegex.findall( ifconfig ) + macs = self._macMatchRegex.findall( ifconfig ) + self.ip = ips[ 0 ] if ips else None + self.mac = macs[ 0 ] if macs else None + return self.ip, self.mac + + def IP( self ): + "Return IP address" + return self.ip + + def MAC( self ): + "Return MAC address" + return self.mac + + def isUp( self, setUp=False ): + "Return whether interface is up" + if setUp: + cmdOutput = self.ifconfig( 'up' ) + # no output indicates success + if cmdOutput: + error( "Error setting %s up: %s " % ( self.name, cmdOutput ) ) + return False + else: + return True + else: + return "UP" in self.ifconfig() + + def rename( self, newname ): + "Rename interface" + if self.node and self.name in self.node.nameToIntf: + # rename intf in node's nameToIntf + self.node.nameToIntf[newname] = self.node.nameToIntf.pop(self.name) + self.ifconfig( 'down' ) + result = self.cmd( 'ip link set', self.name, 'name', newname ) + self.name = newname + self.ifconfig( 'up' ) + return result + + # The reason why we configure things in this way is so + # That the parameters can be listed and documented in + # the config method. + # Dealing with subclasses and superclasses is slightly + # annoying, but at least the information is there! + + def setParam( self, results, method, **param ): + """Internal method: configure a *single* parameter + results: dict of results to update + method: config method name + param: arg=value (ignore if value=None) + value may also be list or dict""" + name, value = list( param.items() )[ 0 ] + f = getattr( self, method, None ) + if not f or value is None: + return None + if isinstance( value, list ): + result = f( *value ) + elif isinstance( value, dict ): + result = f( **value ) + else: + result = f( value ) + results[ name ] = result + return result + + def config( self, mac=None, ip=None, ifconfig=None, + up=True, **_params ): + """Configure Node according to (optional) parameters: + mac: MAC address + ip: IP address + ifconfig: arbitrary interface configuration + Subclasses should override this method and call + the parent class's config(**params)""" + # If we were overriding this method, we would call + # the superclass config method here as follows: + # r = Parent.config( **params ) + r = {} + self.setParam( r, 'setMAC', mac=mac ) + self.setParam( r, 'setIP', ip=ip ) + self.setParam( r, 'isUp', up=up ) + self.setParam( r, 'ifconfig', ifconfig=ifconfig ) + return r + + def delete( self ): + "Delete interface" + self.cmd( 'ip link del ' + self.name ) + # We used to do this, but it slows us down: + # if self.node.inNamespace: + # Link may have been dumped into root NS + # quietRun( 'ip link del ' + self.name ) + self.node.delIntf( self ) + self.link = None + + def status( self ): + "Return intf status as a string" + links, _err, _result = self.node.pexec( 'ip link show' ) + if self.name in links: + return "OK" + else: + return "MISSING" + + def __repr__( self ): + return '<%s %s>' % ( self.__class__.__name__, self.name ) + + def __str__( self ): + return self.name + + +class TCIntf( Intf ): + """Interface customized by tc (traffic control) utility + Allows specification of bandwidth limits (various methods) + as well as delay, loss and max queue length""" + + # The parameters we use seem to work reasonably up to 1 Gb/sec + # For higher data rates, we will probably need to change them. + bwParamMax = 1000 + + def bwCmds( self, bw=None, speedup=0, use_hfsc=False, use_tbf=False, + latency_ms=None, enable_ecn=False, enable_red=False ): + "Return tc commands to set bandwidth" + + cmds, parent = [], ' root ' + + if bw and ( bw < 0 or bw > self.bwParamMax ): + error( 'Bandwidth limit', bw, 'is outside supported range 0..%d' + % self.bwParamMax, '- ignoring\n' ) + elif bw is not None: + # BL: this seems a bit brittle... + if ( speedup > 0 and + self.node.name[0:1] == 's' ): + bw = speedup + # This may not be correct - we should look more closely + # at the semantics of burst (and cburst) to make sure we + # are specifying the correct sizes. For now I have used + # the same settings we had in the mininet-hifi code. + if use_hfsc: + cmds += [ '%s qdisc add dev %s root handle 5:0 hfsc default 1', + '%s class add dev %s parent 5:0 classid 5:1 hfsc sc ' + + 'rate %fMbit ul rate %fMbit' % ( bw, bw ) ] + elif use_tbf: + if latency_ms is None: + latency_ms = 15.0 * 8 / bw + cmds += [ '%s qdisc add dev %s root handle 5: tbf ' + + 'rate %fMbit burst 15000 latency %fms' % + ( bw, latency_ms ) ] + else: + cmds += [ '%s qdisc add dev %s root handle 5:0 htb default 1', + '%s class add dev %s parent 5:0 classid 5:1 htb ' + + 'rate %fMbit burst 15k' % bw ] + parent = ' parent 5:1 ' + + # ECN or RED + if enable_ecn: + cmds += [ '%s qdisc add dev %s' + parent + + 'handle 6: red limit 1000000 ' + + 'min 30000 max 35000 avpkt 1500 ' + + 'burst 20 ' + + 'bandwidth %fmbit probability 1 ecn' % bw ] + parent = ' parent 6: ' + elif enable_red: + cmds += [ '%s qdisc add dev %s' + parent + + 'handle 6: red limit 1000000 ' + + 'min 30000 max 35000 avpkt 1500 ' + + 'burst 20 ' + + 'bandwidth %fmbit probability 1' % bw ] + parent = ' parent 6: ' + return cmds, parent + + @staticmethod + def delayCmds( parent, delay=None, jitter=None, + loss=None, max_queue_size=None ): + "Internal method: return tc commands for delay and loss" + cmds = [] + if loss and ( loss < 0 or loss > 100 ): + error( 'Bad loss percentage', loss, '%%\n' ) + else: + # Delay/jitter/loss/max queue size + netemargs = '%s%s%s%s' % ( + 'delay %s ' % delay if delay is not None else '', + '%s ' % jitter if jitter is not None else '', + 'loss %.5f ' % loss if (loss is not None and loss > 0) else '', + 'limit %d' % max_queue_size if max_queue_size is not None + else '' ) + if netemargs: + cmds = [ '%s qdisc add dev %s ' + parent + + ' handle 10: netem ' + + netemargs ] + parent = ' parent 10:1 ' + return cmds, parent + + def tc( self, cmd, tc='tc' ): + "Execute tc command for our interface" + c = cmd % (tc, self) # Add in tc command and our name + debug(" *** executing command: %s\n" % c) + return self.cmd( c ) + + def config( # pylint: disable=arguments-renamed,arguments-differ + self, + bw=None, delay=None, jitter=None, loss=None, + gro=False, txo=True, rxo=True, + speedup=0, use_hfsc=False, use_tbf=False, + latency_ms=None, enable_ecn=False, enable_red=False, + max_queue_size=None, **params ): + """Configure the port and set its properties. + bw: bandwidth in b/s (e.g. '10m') + delay: transmit delay (e.g. '1ms' ) + jitter: jitter (e.g. '1ms') + loss: loss (e.g. '1%' ) + gro: enable GRO (False) + txo: enable transmit checksum offload (True) + rxo: enable receive checksum offload (True) + speedup: experimental switch-side bw option + use_hfsc: use HFSC scheduling + use_tbf: use TBF scheduling + latency_ms: TBF latency parameter + enable_ecn: enable ECN (False) + enable_red: enable RED (False) + max_queue_size: queue limit parameter for netem""" + + # Support old names for parameters + gro = not params.pop( 'disable_gro', not gro ) + + result = Intf.config( self, **params) + + def on( isOn ): + "Helper method: bool -> 'on'/'off'" + return 'on' if isOn else 'off' + + # Set offload parameters with ethool + self.cmd( 'ethtool -K', self, + 'gro', on( gro ), + 'tx', on( txo ), + 'rx', on( rxo ) ) + + # Optimization: return if nothing else to configure + # Question: what happens if we want to reset things? + if ( bw is None and not delay and not loss + and max_queue_size is None ): + return None + + # Clear existing configuration + tcoutput = self.tc( '%s qdisc show dev %s' ) + if "priomap" not in tcoutput and "noqueue" not in tcoutput: + cmds = [ '%s qdisc del dev %s root' ] + else: + cmds = [] + + # Bandwidth limits via various methods + bwcmds, parent = self.bwCmds( bw=bw, speedup=speedup, + use_hfsc=use_hfsc, use_tbf=use_tbf, + latency_ms=latency_ms, + enable_ecn=enable_ecn, + enable_red=enable_red ) + cmds += bwcmds + + # Delay/jitter/loss/max_queue_size using netem + delaycmds, parent = self.delayCmds( delay=delay, jitter=jitter, + loss=loss, + max_queue_size=max_queue_size, + parent=parent ) + cmds += delaycmds + + # Ugly but functional: display configuration info + stuff = ( ( [ '%.2fMbit' % bw ] if bw is not None else [] ) + + ( [ '%s delay' % delay ] if delay is not None else [] ) + + ( [ '%s jitter' % jitter ] if jitter is not None else [] ) + + ( ['%.5f%% loss' % loss ] if loss is not None else [] ) + + ( [ 'ECN' ] if enable_ecn else [ 'RED' ] + if enable_red else [] ) ) + info( '(' + ' '.join( stuff ) + ') ' ) + + # Execute all the commands in our node + debug("at map stage w/cmds: %s\n" % cmds) + tcoutputs = [ self.tc(cmd) for cmd in cmds ] + for output in tcoutputs: + if output != '': + error( "*** Error: %s" % output ) + debug( "cmds:", cmds, '\n' ) + debug( "outputs:", tcoutputs, '\n' ) + result[ 'tcoutputs'] = tcoutputs + result[ 'parent' ] = parent + + return result + + +class Link( object ): + + """A basic link is just a veth pair. + Other types of links could be tunnels, link emulators, etc..""" + + # pylint: disable=too-many-branches + def __init__( self, node1, node2, port1=None, port2=None, + intfName1=None, intfName2=None, addr1=None, addr2=None, + intf=Intf, cls1=None, cls2=None, params1=None, + params2=None, fast=True, **params ): + """Create veth link to another node, making two new interfaces. + node1: first node + node2: second node + port1: node1 port number (optional) + port2: node2 port number (optional) + intf: default interface class/constructor + cls1, cls2: optional interface-specific constructors + intfName1: node1 interface name (optional) + intfName2: node2 interface name (optional) + params1: parameters for interface 1 (optional) + params2: parameters for interface 2 (optional) + **params: additional parameters for both interfaces""" + + # This is a bit awkward; it seems that having everything in + # params is more orthogonal, but being able to specify + # in-line arguments is more convenient! So we support both. + params1 = dict( params1 ) if params1 else {} + params2 = dict( params2 ) if params2 else {} + if port1 is not None: + params1[ 'port' ] = port1 + if port2 is not None: + params2[ 'port' ] = port2 + if 'port' not in params1: + params1[ 'port' ] = node1.newPort() + if 'port' not in params2: + params2[ 'port' ] = node2.newPort() + if not intfName1: + intfName1 = self.intfName( node1, params1[ 'port' ] ) + if not intfName2: + intfName2 = self.intfName( node2, params2[ 'port' ] ) + + # Update with remaining parameter list + params1.update( params ) + params2.update( params ) + + self.fast = fast + if fast: + params1.setdefault( 'moveIntfFn', self._ignore ) + params2.setdefault( 'moveIntfFn', self._ignore ) + self.makeIntfPair( intfName1, intfName2, addr1, addr2, + node1, node2, deleteIntfs=False ) + else: + self.makeIntfPair( intfName1, intfName2, addr1, addr2 ) + + if not cls1: + cls1 = intf + if not cls2: + cls2 = intf + + intf1 = cls1( name=intfName1, node=node1, + link=self, mac=addr1, **params1 ) + intf2 = cls2( name=intfName2, node=node2, + link=self, mac=addr2, **params2 ) + + # All we are is dust in the wind, and our two interfaces + self.intf1, self.intf2 = intf1, intf2 + + # pylint: enable=too-many-branches + + @staticmethod + def _ignore( *args, **kwargs ): + "Ignore any arguments" + pass + + def intfName( self, node, n ): + "Construct a canonical interface name node-ethN for interface n." + # Leave this as an instance method for now + assert self + return node.name + '-eth' + repr( n ) + + @classmethod + def makeIntfPair( cls, intfname1, intfname2, addr1=None, addr2=None, + node1=None, node2=None, deleteIntfs=True ): + """Create pair of interfaces + intfname1: name for interface 1 + intfname2: name for interface 2 + addr1: MAC address for interface 1 (optional) + addr2: MAC address for interface 2 (optional) + node1: home node for interface 1 (optional) + node2: home node for interface 2 (optional) + (override this method [and possibly delete()] + to change link type)""" + # Leave this as a class method for now + assert cls + return makeIntfPair( intfname1, intfname2, addr1, addr2, node1, node2, + deleteIntfs=deleteIntfs ) + + def delete( self ): + "Delete this link" + self.intf1.delete() + self.intf1 = None + self.intf2.delete() + self.intf2 = None + + def stop( self ): + "Override to stop and clean up link as needed" + self.delete() + + def status( self ): + "Return link status as a string" + return "(%s %s)" % ( self.intf1.status(), self.intf2.status() ) + + def __str__( self ): + return '%s<->%s' % ( self.intf1, self.intf2 ) + + +class OVSIntf( Intf ): + "Patch interface on an OVSSwitch" + + def ifconfig( self, *args ): + cmd = ' '.join( args ) + if cmd == 'up': + # OVSIntf is always up + return + else: + raise Exception( 'OVSIntf cannot do ifconfig ' + cmd ) + + +class OVSLink( Link ): + """Link that makes patch links between OVSSwitches + Warning: in testing we have found that no more + than ~64 OVS patch links should be used in row.""" + + def __init__( self, node1, node2, **kwargs ): + "See Link.__init__() for options" + if 'OVSSwitch' not in globals(): + # pylint: disable=import-outside-toplevel,cyclic-import + from mininet.node import OVSSwitch + self.isPatchLink = False + if ( isinstance( node1, OVSSwitch ) and + isinstance( node2, OVSSwitch ) ): + self.isPatchLink = True + kwargs.update( cls1=OVSIntf, cls2=OVSIntf ) + Link.__init__( self, node1, node2, **kwargs ) + + # pylint: disable=arguments-renamed, arguments-differ, signature-differs + def makeIntfPair( self, *args, **kwargs ): + "Usually delegated to OVSSwitch" + if self.isPatchLink: + return None, None + else: + return Link.makeIntfPair( *args, **kwargs ) + + +class TCLink( Link ): + "Link with TC interfaces" + def __init__( self, *args, **kwargs): + kwargs.setdefault( 'cls1', TCIntf ) + kwargs.setdefault( 'cls2', TCIntf ) + Link.__init__( self, *args, **kwargs) + + +class TCULink( TCLink ): + """TCLink with default settings optimized for UserSwitch + (txo=rxo=0/False). Unfortunately with recent Linux kernels, + enabling TX and RX checksum offload on veth pairs doesn't work + well with UserSwitch: either it gets terrible performance or + TCP packets with bad checksums are generated, forwarded, and + *dropped* due to having bad checksums! OVS and LinuxBridge seem + to cope with this somehow, but it is likely to be an issue with + many software Ethernet bridges.""" + + def __init__( self, *args, **kwargs ): + kwargs.update( txo=False, rxo=False ) + TCLink.__init__( self, *args, **kwargs ) diff --git a/mininet/log.py b/mininet/log.py new file mode 100644 index 0000000..881d22c --- /dev/null +++ b/mininet/log.py @@ -0,0 +1,172 @@ +"Logging functions for Mininet." + +import logging +from logging import Logger +import types + + +# Create a new loglevel, 'CLI info', which enables a Mininet user to see only +# the output of the commands they execute, plus any errors or warnings. This +# level is in between info and warning. CLI info-level commands should not be +# printed during regression tests. +OUTPUT = 25 + +LEVELS = { 'debug': logging.DEBUG, + 'info': logging.INFO, + 'output': OUTPUT, + 'warning': logging.WARNING, + 'warn': logging.WARNING, + 'error': logging.ERROR, + 'critical': logging.CRITICAL } + +# change this to logging.INFO to get printouts when running unit tests +LOGLEVELDEFAULT = OUTPUT + +# default: '%(asctime)s - %(name)s - %(levelname)s - %(message)s' +LOGMSGFORMAT = '%(message)s' + + +# Modified from python2.5/__init__.py +class StreamHandlerNoNewline( logging.StreamHandler ): + """StreamHandler that doesn't print newlines by default. + Since StreamHandler automatically adds newlines, define a mod to more + easily support interactive mode when we want it, or errors-only logging + for running unit tests.""" + + def emit( self, record ): + """Emit a record. + If a formatter is specified, it is used to format the record. + The record is then written to the stream with a trailing newline + [ N.B. this may be removed depending on feedback ]. If exception + information is present, it is formatted using + traceback.printException and appended to the stream.""" + try: + msg = self.format( record ) + fs = '%s' # was '%s\n' + if not hasattr( types, 'UnicodeType' ): # if no unicode support... + self.stream.write( fs % msg ) + else: + try: + self.stream.write( fs % msg ) + except UnicodeError: + self.stream.write( fs % msg.encode( 'UTF-8' ) ) + self.flush() + except ( KeyboardInterrupt, SystemExit ): + raise + except: # noqa pylint: disable=bare-except + self.handleError( record ) + + +class Singleton( type ): + """Singleton pattern from Wikipedia + See http://en.wikipedia.org/wiki/Singleton_Pattern + + Intended to be used as a __metaclass_ param, as shown for the class + below.""" + + def __init__( cls, name, bases, dict_ ): + super( Singleton, cls ).__init__( name, bases, dict_ ) + cls.instance = None + + def __call__( cls, *args, **kw ): + if cls.instance is None: + cls.instance = super( Singleton, cls ).__call__( *args, **kw ) + return cls.instance + + +class MininetLogger( Logger, object ): + """Mininet-specific logger + Enable each mininet .py file to with one import: + + from mininet.log import [lg, info, error] + + ...get a default logger that doesn't require one newline per logging + call. + + Inherit from object to ensure that we have at least one new-style base + class, and can then use the __metaclass__ directive, to prevent this + error: + + TypeError: Error when calling the metaclass bases + a new-style class can't have only classic bases + + If Python2.5/logging/__init__.py defined Filterer as a new-style class, + via Filterer( object ): rather than Filterer, we wouldn't need this. + + Use singleton pattern to ensure only one logger is ever created.""" + + __metaclass__ = Singleton + + def __init__( self, name="mininet" ): + + Logger.__init__( self, name ) + + # create console handler + ch = StreamHandlerNoNewline() + # create formatter + formatter = logging.Formatter( LOGMSGFORMAT ) + # add formatter to ch + ch.setFormatter( formatter ) + # add ch to lg and initialize log level + self.addHandler( ch ) + self.ch = ch + self.setLogLevel() + + def setLogLevel( self, levelname=None ): + """Setup loglevel. + Convenience function to support lowercase names. + levelName: level name from LEVELS""" + if levelname and levelname not in LEVELS: + print(LEVELS) + raise Exception( 'setLogLevel: unknown levelname %s' % levelname ) + level = LEVELS.get( levelname, LOGLEVELDEFAULT ) + self.setLevel( level ) + self.ch.setLevel( level ) + + def output( self, msg, *args, **kwargs ): + """Log 'msg % args' with severity 'OUTPUT'. + + To pass exception information, use the keyword argument exc_info + with a true value, e.g. + + logger.warning("Houston, we have a %s", "cli output", exc_info=1) + """ + if getattr( self.manager, 'disabled', 0 ) >= OUTPUT: + return + if self.isEnabledFor( OUTPUT ): + self._log( OUTPUT, msg, args, kwargs ) + + +# Make things a bit more convenient by adding aliases +# (info, warn, error, debug) and allowing info( 'this', 'is', 'OK' ) +# In the future we may wish to make things more efficient by only +# doing the join (and calling the function) unless the logging level +# is high enough. + +def makeListCompatible( fn ): + """Return a new function allowing fn( 'a 1 b' ) to be called as + newfn( 'a', 1, 'b' )""" + + def newfn( *args ): + "Generated function. Closure-ish." + if len( args ) == 1: + return fn( *args ) + args = ' '.join( str( arg ) for arg in args ) + return fn( args ) + + # Fix newfn's name and docstring + setattr( newfn, '__name__', fn.__name__ ) + setattr( newfn, '__doc__', fn.__doc__ ) + return newfn + + +# Initialize logger and logging functions + +logging.setLoggerClass( MininetLogger ) +lg = logging.getLogger( "mininet" ) +_loggers = lg.info, lg.output, lg.warning, lg.error, lg.debug +_loggers = tuple( makeListCompatible( logger ) for logger in _loggers ) +lg.info, lg.output, lg.warning, lg.error, lg.debug = _loggers +info, output, warning, error, debug = _loggers +warn = warning # alternate/old name +setLogLevel = lg.setLogLevel diff --git a/mininet/moduledeps.py b/mininet/moduledeps.py new file mode 100644 index 0000000..470465a --- /dev/null +++ b/mininet/moduledeps.py @@ -0,0 +1,72 @@ +"Module dependency utility functions for Mininet." + +from os import environ +from sys import exit # pylint: disable=redefined-builtin + +from mininet.util import quietRun, BaseString +from mininet.log import info, error, debug + + +def lsmod(): + "Return output of lsmod." + return quietRun( 'lsmod' ) + +def rmmod( mod ): + """Return output of lsmod. + mod: module string""" + return quietRun( [ 'rmmod', mod ] ) + +def modprobe( mod ): + """Return output of modprobe + mod: module string""" + return quietRun( [ 'modprobe', mod ] ) + + +OF_KMOD = 'ofdatapath' +OVS_KMOD = 'openvswitch_mod' # Renamed 'openvswitch' in OVS 1.7+/Linux 3.5+ +TUN = 'tun' + +def moduleDeps( subtract=None, add=None ): + """Handle module dependencies. + subtract: string or list of module names to remove, if already loaded + add: string or list of module names to add, if not already loaded""" + subtract = subtract if subtract is not None else [] + add = add if add is not None else [] + if isinstance( subtract, BaseString ): + subtract = [ subtract ] + if isinstance( add, BaseString ): + add = [ add ] + for mod in subtract: + if mod in lsmod(): + info( '*** Removing ' + mod + '\n' ) + rmmodOutput = rmmod( mod ) + if rmmodOutput: + error( 'Error removing ' + mod + ': "%s">\n' % rmmodOutput ) + exit( 1 ) + if mod in lsmod(): + error( 'Failed to remove ' + mod + '; still there!\n' ) + exit( 1 ) + for mod in add: + if mod not in lsmod(): + info( '*** Loading ' + mod + '\n' ) + modprobeOutput = modprobe( mod ) + if modprobeOutput: + error( 'Error inserting ' + mod + + ' - is it installed and available via modprobe?\n' + + 'Error was: "%s"\n' % modprobeOutput ) + if mod not in lsmod(): + error( 'Failed to insert ' + mod + ' - quitting.\n' ) + exit( 1 ) + else: + debug( '*** ' + mod + ' already loaded\n' ) + + +def pathCheck( *args, **kwargs ): + "Make sure each program in *args can be found in $PATH." + moduleName = kwargs.get( 'moduleName', 'it' ) + for arg in args: + if not quietRun( 'which ' + arg ): + error( 'Cannot find required executable %s.\n' % arg + + 'Please make sure that %s is installed ' % moduleName + + 'and available in your $PATH:\n(%s)\n' % environ[ 'PATH' ] ) + exit( 1 ) diff --git a/mininet/net.py b/mininet/net.py new file mode 100755 index 0000000..5760832 --- /dev/null +++ b/mininet/net.py @@ -0,0 +1,1023 @@ +""" + + Mininet: A simple networking testbed for OpenFlow/SDN! + +author: Bob Lantz (rlantz@cs.stanford.edu) +author: Brandon Heller (brandonh@stanford.edu) + +Mininet creates scalable OpenFlow test networks by using +process-based virtualization and network namespaces. + +Simulated hosts are created as processes in separate network +namespaces. This allows a complete OpenFlow network to be simulated on +top of a single Linux kernel. + +Each host has: + +A virtual console (pipes to a shell) +A virtual interfaces (half of a veth pair) +A parent shell (and possibly some child processes) in a namespace + +Hosts have a network interface which is configured via ifconfig/ip +link/etc. + +This version supports both the kernel and user space datapaths +from the OpenFlow reference implementation (openflowswitch.org) +as well as OpenVSwitch (openvswitch.org.) + +In kernel datapath mode, the controller and switches are simply +processes in the root namespace. + +Kernel OpenFlow datapaths are instantiated using dpctl(8), and are +attached to the one side of a veth pair; the other side resides in the +host namespace. In this mode, switch processes can simply connect to the +controller via the loopback interface. + +In user datapath mode, the controller and switches can be full-service +nodes that live in their own network namespaces and have management +interfaces and IP addresses on a control network (e.g. 192.168.123.1, +currently routed although it could be bridged.) + +In addition to a management interface, user mode switches also have +several switch interfaces, halves of veth pairs whose other halves +reside in the host nodes that the switches are connected to. + +Consistent, straightforward naming is important in order to easily +identify hosts, switches and controllers, both from the CLI and +from program code. Interfaces are named to make it easy to identify +which interfaces belong to which node. + +The basic naming scheme is as follows: + + Host nodes are named h1-hN + Switch nodes are named s1-sN + Controller nodes are named c0-cN + Interfaces are named {nodename}-eth0 .. {nodename}-ethN + +Note: If the network topology is created using mininet.topo, then +node numbers are unique among hosts and switches (e.g. we have +h1..hN and SN..SN+M) and also correspond to their default IP addresses +of 10.x.y.z/8 where x.y.z is the base-256 representation of N for +hN. This mapping allows easy determination of a node's IP +address from its name, e.g. h1 -> 10.0.0.1, h257 -> 10.0.1.1. + +Note also that 10.0.0.1 can often be written as 10.1 for short, e.g. +"ping 10.1" is equivalent to "ping 10.0.0.1". + +Currently we wrap the entire network in a 'mininet' object, which +constructs a simulated network based on a network topology created +using a topology object (e.g. LinearTopo) from mininet.topo or +mininet.topolib, and a Controller which the switches will connect +to. Several configuration options are provided for functions such as +automatically setting MAC addresses, populating the ARP table, or +even running a set of terminals to allow direct interaction with nodes. + +After the network is created, it can be started using start(), and a +variety of useful tasks maybe performed, including basic connectivity +and bandwidth tests and running the mininet CLI. + +Once the network is up and running, test code can easily get access +to host and switch objects which can then be used for arbitrary +experiments, typically involving running a series of commands on the +hosts. + +After all desired tests or activities have been completed, the stop() +method may be called to shut down the network. + +""" + +import os +import re +import select +import signal +import random + +from sys import exit # pylint: disable=redefined-builtin +from time import sleep +from itertools import chain, groupby +from math import ceil + +from mininet.cli import CLI +from mininet.log import info, error, output, warn, debug +from mininet.node import ( Node, Host, OVSKernelSwitch, DefaultController, + Controller ) +from mininet.nodelib import NAT +from mininet.link import Link, Intf +from mininet.util import ( quietRun, fixLimits, numCores, ensureRoot, + macColonHex, ipStr, ipParse, netParse, ipAdd, + waitListening, BaseString, fmtBps ) +from mininet.term import cleanUpScreens, makeTerms + +# Mininet version: should be consistent with README and LICENSE +VERSION = "2.3.1b4" + +class Mininet( object ): + "Network emulation with hosts spawned in network namespaces." + + # pylint: disable=too-many-arguments + def __init__( self, topo=None, switch=OVSKernelSwitch, host=Host, + controller=DefaultController, link=Link, intf=Intf, + build=True, xterms=False, cleanup=False, ipBase='10.0.0.0/8', + inNamespace=False, + autoSetMacs=False, autoStaticArp=False, autoPinCpus=False, + listenPort=None, waitConnected=False ): + """Create Mininet object. + topo: Topo (topology) object or None + switch: default Switch class + host: default Host class/constructor + controller: default Controller class/constructor + link: default Link class/constructor + intf: default Intf class/constructor + ipBase: base IP address for hosts, + build: build now from topo? + xterms: if build now, spawn xterms? + cleanup: if build now, cleanup before creating? + inNamespace: spawn switches and controller in net namespaces? + autoSetMacs: set MAC addrs automatically like IP addresses? + autoStaticArp: set all-pairs static MAC addrs? + autoPinCpus: pin hosts to (real) cores (requires CPULimitedHost)? + listenPort: base listening port to open; will be incremented for + each additional switch in the net if inNamespace=False + waitConnected: wait for switches to Connect? + (False; True/None=wait indefinitely; time(s)=timed wait)""" + self.topo = topo + self.switch = switch + self.host = host + self.controller = controller + self.link = link + self.intf = intf + self.ipBase = ipBase + self.ipBaseNum, self.prefixLen = netParse( self.ipBase ) + hostIP = ( 0xffffffff >> self.prefixLen ) & self.ipBaseNum + # Start for address allocation + self.nextIP = hostIP if hostIP > 0 else 1 + self.inNamespace = inNamespace + self.xterms = xterms + self.cleanup = cleanup + self.autoSetMacs = autoSetMacs + self.autoStaticArp = autoStaticArp + self.autoPinCpus = autoPinCpus + self.numCores = numCores() + self.nextCore = 0 # next core for pinning hosts to CPUs + self.listenPort = listenPort + self.waitConn = waitConnected + + self.hosts = [] + self.switches = [] + self.controllers = [] + self.links = [] + + self.nameToNode = {} # name to Node (Host/Switch) objects + + self.terms = [] # list of spawned xterm processes + + Mininet.init() # Initialize Mininet if necessary + + self.built = False + if topo and build: + self.build() + + def waitConnected( self, timeout=None, delay=.5 ): + """wait for each switch to connect to a controller + timeout: time to wait, or None or True to wait indefinitely + delay: seconds to sleep per iteration + returns: True if all switches are connected""" + info( '*** Waiting for switches to connect\n' ) + time = 0.0 + remaining = list( self.switches ) + # False: 0s timeout; None: wait forever (preserve 2.2 behavior) + if isinstance( timeout, bool ): + timeout = None if timeout else 0 + while True: + for switch in tuple( remaining ): + if switch.connected(): + info( '%s ' % switch ) + remaining.remove( switch ) + if not remaining: + info( '\n' ) + return True + if timeout is not None and time >= timeout: + break + sleep( delay ) + time += delay + warn( 'Timed out after %d seconds\n' % time ) + for switch in remaining.copy(): + if not switch.connected(): + warn( 'Warning: %s is not connected to a controller\n' + % switch.name ) + else: + remaining.remove( switch ) + return not remaining + + def addHost( self, name, cls=None, **params ): + """Add host. + name: name of host to add + cls: custom host class/constructor (optional) + params: parameters for host + returns: added host""" + # Default IP and MAC addresses + defaults = { 'ip': ipAdd( self.nextIP, + ipBaseNum=self.ipBaseNum, + prefixLen=self.prefixLen ) + + '/%s' % self.prefixLen } + if self.autoSetMacs: + defaults[ 'mac' ] = macColonHex( self.nextIP ) + if self.autoPinCpus: + defaults[ 'cores' ] = self.nextCore + self.nextCore = ( self.nextCore + 1 ) % self.numCores + self.nextIP += 1 + defaults.update( params ) + if not cls: + cls = self.host + h = cls( name, **defaults ) + self.hosts.append( h ) + self.nameToNode[ name ] = h + return h + + def delNode( self, node, nodes=None): + """Delete node + node: node to delete + nodes: optional list to delete from (e.g. self.hosts)""" + if nodes is None: + nodes = ( self.hosts if node in self.hosts else + ( self.switches if node in self.switches else + ( self.controllers if node in self.controllers else + [] ) ) ) + node.stop( deleteIntfs=True ) + node.terminate() + nodes.remove( node ) + del self.nameToNode[ node.name ] + + def delHost( self, host ): + "Delete a host" + self.delNode( host, nodes=self.hosts ) + + def addSwitch( self, name, cls=None, **params ): + """Add switch. + name: name of switch to add + cls: custom switch class/constructor (optional) + returns: added switch + side effect: increments listenPort ivar .""" + defaults = { 'listenPort': self.listenPort, + 'inNamespace': self.inNamespace } + defaults.update( params ) + if not cls: + cls = self.switch + sw = cls( name, **defaults ) + if not self.inNamespace and self.listenPort: + self.listenPort += 1 + self.switches.append( sw ) + self.nameToNode[ name ] = sw + return sw + + def delSwitch( self, switch ): + "Delete a switch" + self.delNode( switch, nodes=self.switches ) + + def addController( self, name='c0', controller=None, **params ): + """Add controller. + controller: Controller class""" + # Get controller class + if not controller: + controller = self.controller + # Construct new controller if one is not given + if isinstance( name, Controller ): + controller_new = name + # Pylint thinks controller is a str() + # pylint: disable=maybe-no-member + name = controller_new.name + # pylint: enable=maybe-no-member + else: + controller_new = controller( name, **params ) + # Add new controller to net + if controller_new: # allow controller-less setups + self.controllers.append( controller_new ) + self.nameToNode[ name ] = controller_new + return controller_new + + def delController( self, controller ): + """Delete a controller + Warning - does not reconfigure switches, so they + may still attempt to connect to it!""" + self.delNode( controller ) + + def addNAT( self, name='nat0', connect=True, inNamespace=False, + **params): + """Add a NAT to the Mininet network + name: name of NAT node + connect: switch to connect to | True (s1) | None + inNamespace: create in a network namespace + params: other NAT node params, notably: + ip: used as default gateway address""" + nat = self.addHost( name, cls=NAT, inNamespace=inNamespace, + subnet=self.ipBase, **params ) + # find first switch and create link + if connect: + if not isinstance( connect, Node ): + # Use first switch if not specified + connect = self.switches[ 0 ] + # Connect the nat to the switch + self.addLink( nat, connect ) + # Set the default route on hosts + natIP = nat.params[ 'ip' ].split('/')[ 0 ] + for host in self.hosts: + if host.inNamespace: + host.setDefaultRoute( 'via %s' % natIP ) + return nat + + # BL: We now have four ways to look up nodes + # This may (should?) be cleaned up in the future. + def getNodeByName( self, *args ): + "Return node(s) with given name(s)" + if len( args ) == 1: + return self.nameToNode[ args[ 0 ] ] + return [ self.nameToNode[ n ] for n in args ] + + def get( self, *args ): + "Convenience alias for getNodeByName" + return self.getNodeByName( *args ) + + # Even more convenient syntax for node lookup and iteration + def __getitem__( self, key ): + "net[ name ] operator: Return node with given name" + return self.nameToNode[ key ] + + def __delitem__( self, key ): + "del net[ name ] operator - delete node with given name" + self.delNode( self.nameToNode[ key ] ) + + def __iter__( self ): + "return iterator over node names" + for node in chain( self.hosts, self.switches, self.controllers ): + yield node.name + + def __len__( self ): + "returns number of nodes in net" + return ( len( self.hosts ) + len( self.switches ) + + len( self.controllers ) ) + + def __contains__( self, item ): + "returns True if net contains named node" + return item in self.nameToNode + + def keys( self ): + "return a list of all node names or net's keys" + return list( self ) + + def values( self ): + "return a list of all nodes or net's values" + return [ self[name] for name in self ] + + def items( self ): + "return (key,value) tuple list for every node in net" + return zip( self.keys(), self.values() ) + + @staticmethod + def randMac(): + "Return a random, non-multicast MAC address" + return macColonHex( random.randint(1, 2**48 - 1) & 0xfeffffffffff | + 0x020000000000 ) + + def addLink( self, node1, node2, port1=None, port2=None, + cls=None, **params ): + """"Add a link from node1 to node2 + node1: source node (or name) + node2: dest node (or name) + port1: source port (optional) + port2: dest port (optional) + cls: link class (optional) + params: additional link params (optional) + returns: link object""" + # Accept node objects or names + node1 = node1 if not isinstance( node1, BaseString ) else self[ node1 ] + node2 = node2 if not isinstance( node2, BaseString ) else self[ node2 ] + options = dict( params ) + # Port is optional + if port1 is not None: + options.setdefault( 'port1', port1 ) + if port2 is not None: + options.setdefault( 'port2', port2 ) + if self.intf is not None: + options.setdefault( 'intf', self.intf ) + # Set default MAC - this should probably be in Link + options.setdefault( 'addr1', self.randMac() ) + options.setdefault( 'addr2', self.randMac() ) + cls = self.link if cls is None else cls + link = cls( node1, node2, **options ) + self.links.append( link ) + return link + + def delLink( self, link ): + "Remove a link from this network" + link.delete() + self.links.remove( link ) + + def linksBetween( self, node1, node2 ): + "Return Links between node1 and node2" + return [ link for link in self.links + if ( node1, node2 ) in ( + ( link.intf1.node, link.intf2.node ), + ( link.intf2.node, link.intf1.node ) ) ] + + def delLinkBetween( self, node1, node2, index=0, allLinks=False ): + """Delete link(s) between node1 and node2 + index: index of link to delete if multiple links (0) + allLinks: ignore index and delete all such links (False) + returns: deleted link(s)""" + links = self.linksBetween( node1, node2 ) + if not allLinks: + links = [ links[ index ] ] + for link in links: + self.delLink( link ) + return links + + def configHosts( self ): + "Configure a set of hosts." + for host in self.hosts: + info( host.name + ' ' ) + intf = host.defaultIntf() + if intf: + host.configDefault() + else: + # Don't configure nonexistent intf + host.configDefault( ip=None, mac=None ) + # You're low priority, dude! + # BL: do we want to do this here or not? + # May not make sense if we have CPU limiting... + # quietRun( 'renice +18 -p ' + repr( host.pid ) ) + # This may not be the right place to do this, but + # it needs to be done somewhere. + info( '\n' ) + + def buildFromTopo( self, topo=None ): + """Build mininet from a topology object + At the end of this function, everything should be connected + and up.""" + + # Possibly we should clean up here and/or validate + # the topo + if self.cleanup: + pass + + info( '*** Creating network\n' ) + + if not self.controllers and self.controller: + # Add a default controller + info( '*** Adding controller\n' ) + classes = self.controller + if not isinstance( classes, list ): + classes = [ classes ] + for i, cls in enumerate( classes ): + # Allow Controller objects because nobody understands partial() + if isinstance( cls, Controller ): + self.addController( cls ) + else: + self.addController( 'c%d' % i, cls ) + + info( '*** Adding hosts:\n' ) + for hostName in topo.hosts(): + self.addHost( hostName, **topo.nodeInfo( hostName ) ) + info( hostName + ' ' ) + + info( '\n*** Adding switches:\n' ) + for switchName in topo.switches(): + # A bit ugly: add batch parameter if appropriate + params = topo.nodeInfo( switchName) + cls = params.get( 'cls', self.switch ) + if hasattr( cls, 'batchStartup' ): + params.setdefault( 'batch', True ) + self.addSwitch( switchName, **params ) + info( switchName + ' ' ) + + info( '\n*** Adding links:\n' ) + for srcName, dstName, params in topo.links( + sort=True, withInfo=True ): + self.addLink( **params ) + info( '(%s, %s) ' % ( srcName, dstName ) ) + + info( '\n' ) + + def configureControlNetwork( self ): + "Control net config hook: override in subclass" + raise Exception( 'configureControlNetwork: ' + 'should be overriden in subclass', self ) + + def build( self ): + "Build mininet." + if self.topo: + self.buildFromTopo( self.topo ) + if self.inNamespace: + self.configureControlNetwork() + info( '*** Configuring hosts\n' ) + self.configHosts() + if self.xterms: + self.startTerms() + if self.autoStaticArp: + self.staticArp() + self.built = True + + def startTerms( self ): + "Start a terminal for each node." + if 'DISPLAY' not in os.environ: + error( "Error starting terms: Cannot connect to display\n" ) + return + info( "*** Running terms on %s\n" % os.environ[ 'DISPLAY' ] ) + cleanUpScreens() + self.terms += makeTerms( self.controllers, 'controller' ) + self.terms += makeTerms( self.switches, 'switch' ) + self.terms += makeTerms( self.hosts, 'host' ) + + def stopXterms( self ): + "Kill each xterm." + for term in self.terms: + os.kill( term.pid, signal.SIGKILL ) + cleanUpScreens() + + def staticArp( self ): + "Add all-pairs ARP entries to remove the need to handle broadcast." + for src in self.hosts: + for dst in self.hosts: + if src != dst: + src.setARP( ip=dst.IP(), mac=dst.MAC() ) + + def start( self ): + "Start controller and switches." + if not self.built: + self.build() + info( '*** Starting controller\n' ) + for controller in self.controllers: + info( controller.name + ' ') + controller.start() + info( '\n' ) + info( '*** Starting %s switches\n' % len( self.switches ) ) + for switch in self.switches: + info( switch.name + ' ') + switch.start( self.controllers ) + started = {} + for swclass, switches in groupby( + sorted( self.switches, + key=lambda s: str( type( s ) ) ), type ): + switches = tuple( switches ) + if hasattr( swclass, 'batchStartup' ): + success = swclass.batchStartup( switches ) + started.update( { s: s for s in success } ) + info( '\n' ) + if self.waitConn: + self.waitConnected( self.waitConn ) + + def stop( self ): + "Stop the controller(s), switches and hosts" + info( '*** Stopping %i controllers\n' % len( self.controllers ) ) + for controller in self.controllers: + info( controller.name + ' ' ) + controller.stop() + info( '\n' ) + # Unlimit cfs hosts to speed up shutdown + for h in self.hosts: + if hasattr( h, 'unlimit' ): + h.unlimit() + if self.terms: + info( '*** Stopping %i terms\n' % len( self.terms ) ) + self.stopXterms() + info( '*** Stopping %i links\n' % len( self.links ) ) + for link in self.links: + info( '.' ) + link.stop() + info( '\n' ) + info( '*** Stopping %i switches\n' % len( self.switches ) ) + stopped = {} + for swclass, switches in groupby( + sorted( self.switches, + key=lambda s: str( type( s ) ) ), type ): + switches = tuple( switches ) + if hasattr( swclass, 'batchShutdown' ): + success = swclass.batchShutdown( switches ) + stopped.update( { s: s for s in success } ) + for switch in self.switches: + info( switch.name + ' ' ) + if switch not in stopped: + switch.stop() + switch.terminate() + info( '\n' ) + info( '*** Stopping %i hosts\n' % len( self.hosts ) ) + for host in self.hosts: + info( host.name + ' ' ) + host.terminate() + info( '\n*** Done\n' ) + + def run( self, test, *args, **kwargs ): + "Perform a complete start/test/stop cycle." + self.start() + info( '*** Running test\n' ) + result = test( *args, **kwargs ) + self.stop() + return result + + def monitor( self, hosts=None, timeoutms=-1 ): + """Monitor a set of hosts (or all hosts by default), + and return their output, a line at a time. + hosts: (optional) set of hosts to monitor + timeoutms: (optional) timeout value in ms + returns: iterator which returns host, line""" + if hosts is None: + hosts = self.hosts + poller = select.poll() + h1 = hosts[ 0 ] # so we can call class method fdToNode + for host in hosts: + poller.register( host.stdout ) + while True: + ready = poller.poll( timeoutms ) + for fd, event in ready: + host = h1.fdToNode( fd ) + if event & select.POLLIN: + line = host.readline() + if line is not None: + yield host, line + # Return if non-blocking + if not ready and timeoutms >= 0: + yield None, None + + # XXX These test methods should be moved out of this class. + # Probably we should create a tests.py for them + + @staticmethod + def _parsePing( pingOutput ): + "Parse ping output and return packets sent, received." + # Check for downed link + if 'connect: Network is unreachable' in pingOutput: + return 1, 0 + r = r'(\d+) packets transmitted, (\d+)( packets)? received' + m = re.search( r, pingOutput ) + if m is None: + error( '*** Error: could not parse ping output: %s\n' % + pingOutput ) + return 1, 0 + sent, received = int( m.group( 1 ) ), int( m.group( 2 ) ) + return sent, received + + def ping( self, hosts=None, timeout=None ): + """Ping between all specified hosts. + hosts: list of hosts + timeout: time to wait for a response, as string + returns: ploss packet loss percentage""" + # should we check if running? + packets = 0 + lost = 0 + ploss = None + if not hosts: + hosts = self.hosts + output( '*** Ping: testing ping reachability\n' ) + for node in hosts: + output( '%s -> ' % node.name ) + for dest in hosts: + if node != dest: + opts = '' + if timeout: + opts = '-W %s' % timeout + if dest.intfs: + result = node.cmd( 'LANG=C ping -c1 %s %s' % + (opts, dest.IP()) ) + sent, received = self._parsePing( result ) + else: + sent, received = 0, 0 + packets += sent + if received > sent: + error( '*** Error: received too many packets' ) + error( '%s' % result ) + node.cmdPrint( 'route' ) + exit( 1 ) + lost += sent - received + output( ( '%s ' % dest.name ) if received else 'X ' ) + output( '\n' ) + if packets > 0: + ploss = 100.0 * lost / packets + received = packets - lost + output( "*** Results: %i%% dropped (%d/%d received)\n" % + ( ploss, received, packets ) ) + else: + ploss = 0 + output( "*** Warning: No packets sent\n" ) + return ploss + + @staticmethod + def _parsePingFull( pingOutput ): + "Parse ping output and return all data." + errorTuple = (1, 0, 0, 0, 0, 0) + # Check for downed link + r = r'[uU]nreachable' + m = re.search( r, pingOutput ) + if m is not None: + return errorTuple + r = r'(\d+) packets transmitted, (\d+)( packets)? received' + m = re.search( r, pingOutput ) + if m is None: + error( '*** Error: could not parse ping output: %s\n' % + pingOutput ) + return errorTuple + sent, received = int( m.group( 1 ) ), int( m.group( 2 ) ) + r = r'rtt min/avg/max/mdev = ' + r += r'(\d+\.\d+)/(\d+\.\d+)/(\d+\.\d+)/(\d+\.\d+) ms' + m = re.search( r, pingOutput ) + if m is None: + if received == 0: + return errorTuple + error( '*** Error: could not parse ping output: %s\n' % + pingOutput ) + return errorTuple + rttmin = float( m.group( 1 ) ) + rttavg = float( m.group( 2 ) ) + rttmax = float( m.group( 3 ) ) + rttdev = float( m.group( 4 ) ) + return sent, received, rttmin, rttavg, rttmax, rttdev + + def pingFull( self, hosts=None, timeout=None ): + """Ping between all specified hosts and return all data. + hosts: list of hosts + timeout: time to wait for a response, as string + returns: all ping data; see function body.""" + # should we check if running? + # Each value is a tuple: (src, dsd, [all ping outputs]) + all_outputs = [] + if not hosts: + hosts = self.hosts + output( '*** Ping: testing ping reachability\n' ) + for node in hosts: + output( '%s -> ' % node.name ) + for dest in hosts: + if node != dest: + opts = '' + if timeout: + opts = '-W %s' % timeout + result = node.cmd( 'ping -c1 %s %s' % (opts, dest.IP()) ) + outputs = self._parsePingFull( result ) + sent, received, rttmin, rttavg, rttmax, rttdev = outputs + all_outputs.append( (node, dest, outputs) ) + output( ( '%s ' % dest.name ) if received else 'X ' ) + output( '\n' ) + output( "*** Results: \n" ) + for outputs in all_outputs: + src, dest, ping_outputs = outputs + sent, received, rttmin, rttavg, rttmax, rttdev = ping_outputs + output( " %s->%s: %s/%s, " % (src, dest, sent, received ) ) + output( "rtt min/avg/max/mdev %0.3f/%0.3f/%0.3f/%0.3f ms\n" % + (rttmin, rttavg, rttmax, rttdev) ) + return all_outputs + + def pingAll( self, timeout=None ): + """Ping between all hosts. + returns: ploss packet loss percentage""" + return self.ping( timeout=timeout ) + + def pingPair( self ): + """Ping between first two hosts, useful for testing. + returns: ploss packet loss percentage""" + hosts = [ self.hosts[ 0 ], self.hosts[ 1 ] ] + return self.ping( hosts=hosts ) + + def pingAllFull( self ): + """Ping between all hosts. + returns: ploss packet loss percentage""" + return self.pingFull() + + def pingPairFull( self ): + """Ping between first two hosts, useful for testing. + returns: ploss packet loss percentage""" + hosts = [ self.hosts[ 0 ], self.hosts[ 1 ] ] + return self.pingFull( hosts=hosts ) + + @staticmethod + def _iperfVals( iperfcsv, serverip ): + """Return iperf CSV as dict + iperfcsv: iperf -y C output + serverip: iperf server IP address + """ + fields = 'date cip cport sip sport ipver interval sent rate' + lines = iperfcsv.strip().split('\n') + svals = {} + for line in lines: + if ',' not in line: + continue + line = line.split( ',' ) + svals = dict( zip( fields.split(), line ) ) + # Return client in cip:cport, server in sip:sport + if svals[ 'cip' ] == serverip: + svals[ 'cip' ], svals[ 'sip' ] = ( + svals[ 'sip' ], svals[ 'cip' ] ) + svals[ 'cport' ], svals[ 'sport' ] = ( + svals[ 'sport' ], svals[ 'cport' ] ) + return svals + + # XXX This should be cleaned up + + def iperf( self, hosts=None, l4Type='TCP', udpBw='10M', fmt=None, + seconds=5, port=5001): + """Run iperf between two hosts. + hosts: list of hosts; if None, uses first and last hosts + l4Type: string, one of [ TCP, UDP ] + udpBw: bandwidth target for UDP test + fmt: scale/format argument (e.g. m/M for Mbps) + seconds: iperf time to transmit + port: iperf port + returns: two-element array of [ server, client ] speeds + note: send() is buffered, so client rate can be much higher than + the actual transmission rate; on an unloaded system, server + rate should be much closer to the actual receive rate""" + hosts = hosts or [ self.hosts[ 0 ], self.hosts[ -1 ] ] + assert len( hosts ) == 2 + client, server = hosts + output( '*** Iperf: testing', l4Type, 'bandwidth between', + client, 'and', server, '\n' ) + server.cmd( 'killall -9 iperf' ) + # Note: CSV mode + iperfArgs = 'iperf -y C -p %d ' % port + bwArgs = '' + if l4Type == 'UDP': + iperfArgs += '-u ' + bwArgs = '-b ' + udpBw + ' ' + server.sendCmd( iperfArgs + '-s' ) + serverip = server.IP() + if l4Type == 'TCP': + if not waitListening( client, serverip, port ): + raise Exception( 'Could not connect to iperf on port %d' + % port ) + cliout = client.cmd( iperfArgs + '-t %d -c ' % seconds + + server.IP() + ' ' + bwArgs ) + cvals = self._iperfVals( cliout, serverip ) + debug( 'iperf client output:', cliout, cvals ) + serverout = '' + # Wait for output from the client session + while True: + serverout += server.monitor( timeoutms=5000 ) + svals = self._iperfVals( serverout, serverip ) + # Check for the client's source/output port + if ( svals and cvals[ 'sport' ] == svals[ 'sport' ] + and int( svals[ 'rate' ] ) > 0 ): + break + debug( 'iperf server output:', serverout, svals ) + server.sendInt() + serverout += server.waitOutput() + result = [ fmtBps( svals[ 'rate'], fmt ), + fmtBps( cvals[ 'rate' ], fmt ) ] + if l4Type == 'UDP': + result.insert( 0, udpBw ) + output( '*** Results: %s\n' % result ) + return result + + def runCpuLimitTest( self, cpu, duration=5 ): + """run CPU limit test with 'while true' processes. + cpu: desired CPU fraction of each host + duration: test duration in seconds (integer) + returns a single list of measured CPU fractions as floats. + """ + pct = cpu * 100 + info( '*** Testing CPU %.0f%% bandwidth limit\n' % pct ) + hosts = self.hosts + cores = int( quietRun( 'nproc' ) ) + # number of processes to run a while loop on per host + num_procs = int( ceil( cores * cpu ) ) + pids = {} + for h in hosts: + pids[ h ] = [] + for _core in range( num_procs ): + h.cmd( 'while true; do a=1; done &' ) + pids[ h ].append( h.cmd( 'echo $!' ).strip() ) + outputs = {} + time = {} + # get the initial cpu time for each host + for host in hosts: + outputs[ host ] = [] + with open( '/sys/fs/cgroup/cpuacct/%s/cpuacct.usage' % + host, 'r' ) as f: + time[ host ] = float( f.read() ) + for _ in range( duration ): + sleep( 1 ) + for host in hosts: + with open( '/sys/fs/cgroup/cpuacct/%s/cpuacct.usage' % + host, 'r' ) as f: + readTime = float( f.read() ) + outputs[ host ].append( ( ( readTime - time[ host ] ) + / 1000000000 ) / cores * 100 ) + time[ host ] = readTime + for h, pids in pids.items(): + for pid in pids: + h.cmd( 'kill -9 %s' % pid ) + cpu_fractions = [] + for _host, outputs in outputs.items(): + for pct in outputs: + cpu_fractions.append( pct ) + output( '*** Results: %s\n' % cpu_fractions ) + return cpu_fractions + + # BL: I think this can be rewritten now that we have + # a real link class. + def configLinkStatus( self, src, dst, status ): + """Change status of src <-> dst links. + src: node name + dst: node name + status: string {up, down}""" + if src not in self.nameToNode: + error( 'src not in network: %s\n' % src ) + elif dst not in self.nameToNode: + error( 'dst not in network: %s\n' % dst ) + else: + src = self.nameToNode[ src ] + dst = self.nameToNode[ dst ] + connections = src.connectionsTo( dst ) + if len( connections ) == 0: + error( 'src and dst not connected: %s %s\n' % ( src, dst) ) + for srcIntf, dstIntf in connections: + result = srcIntf.ifconfig( status ) + if result: + error( 'link src status change failed: %s\n' % result ) + result = dstIntf.ifconfig( status ) + if result: + error( 'link dst status change failed: %s\n' % result ) + + def interact( self ): + "Start network and run our simple CLI." + self.start() + result = CLI( self ) + self.stop() + return result + + inited = False + + @classmethod + def init( cls ): + "Initialize Mininet" + if cls.inited: + return + ensureRoot() + fixLimits() + cls.inited = True + + +class MininetWithControlNet( Mininet ): + + """Control network support: + + Create an explicit control network. Currently this is only + used/usable with the user datapath. + + Notes: + + 1. If the controller and switches are in the same (e.g. root) + namespace, they can just use the loopback connection. + + 2. If we can get unix domain sockets to work, we can use them + instead of an explicit control network. + + 3. Instead of routing, we could bridge or use 'in-band' control. + + 4. Even if we dispense with this in general, it could still be + useful for people who wish to simulate a separate control + network (since real networks may need one!) + + 5. Basically nobody ever used this code, so it has been moved + into its own class. + + 6. Ultimately we may wish to extend this to allow us to create a + control network which every node's control interface is + attached to.""" + + def configureControlNetwork( self ): + "Configure control network." + self.configureRoutedControlNetwork() + + # We still need to figure out the right way to pass + # in the control network location. + + def configureRoutedControlNetwork( self, ip='192.168.123.1', + prefixLen=16 ): + """Configure a routed control network on controller and switches. + For use with the user datapath only right now.""" + controller = self.controllers[ 0 ] + info( controller.name + ' <->' ) + cip = ip + snum = ipParse( ip ) + for switch in self.switches: + info( ' ' + switch.name ) + link = self.link( switch, controller, port1=0 ) + sintf, cintf = link.intf1, link.intf2 + switch.controlIntf = sintf + snum += 1 + while snum & 0xff in [ 0, 255 ]: + snum += 1 + sip = ipStr( snum ) + cintf.setIP( cip, prefixLen ) + sintf.setIP( sip, prefixLen ) + controller.setHostRoute( sip, cintf ) + switch.setHostRoute( cip, sintf ) + info( '\n' ) + info( '*** Testing control network\n' ) + while not cintf.isUp(): + info( '*** Waiting for', cintf, 'to come up\n' ) + sleep( 1 ) + for switch in self.switches: + while not sintf.isUp(): + info( '*** Waiting for', sintf, 'to come up\n' ) + sleep( 1 ) + if self.ping( hosts=[ switch, controller ] ) != 0: + error( '*** Error: control network test failed\n' ) + exit( 1 ) + info( '\n' ) diff --git a/mininet/node.py b/mininet/node.py new file mode 100644 index 0000000..4034181 --- /dev/null +++ b/mininet/node.py @@ -0,0 +1,1617 @@ +""" +Node objects for Mininet. + +Nodes provide a simple abstraction for interacting with hosts, switches +and controllers. Local nodes are simply one or more processes on the local +machine. + +Node: superclass for all (primarily local) network nodes. + +Host: a virtual host. By default, a host is simply a shell; commands + may be sent using Cmd (which waits for output), or using sendCmd(), + which returns immediately, allowing subsequent monitoring using + monitor(). Examples of how to run experiments using this + functionality are provided in the examples/ directory. By default, + hosts share the root file system, but they may also specify private + directories. + +CPULimitedHost: a virtual host whose CPU bandwidth is limited by + RT or CFS bandwidth limiting. + +Switch: superclass for switch nodes. + +UserSwitch: a switch using the user-space switch from the OpenFlow + reference implementation. + +OVSSwitch: a switch using the Open vSwitch OpenFlow-compatible switch + implementation (openvswitch.org). + +OVSBridge: an Ethernet bridge implemented using Open vSwitch. + Supports STP. + +IVSSwitch: OpenFlow switch using the Indigo Virtual Switch. + +Controller: superclass for OpenFlow controllers. The default controller + is controller(8) from the reference implementation. + +OVSController: The test controller from Open vSwitch. + +NOXController: a controller node using NOX (noxrepo.org). + +Ryu: The Ryu controller (https://osrg.github.io/ryu/) + +RemoteController: a remote controller node, which may use any + arbitrary OpenFlow-compatible controller, and which is not + created or managed by Mininet. + +Future enhancements: + +- Possibly make Node, Switch and Controller more abstract so that + they can be used for both local and remote nodes + +- Create proxy objects for remote nodes (Mininet: Cluster Edition) +""" + +import os +import pty +import re +import signal +import select +from re import findall +from subprocess import Popen, PIPE +from sys import exit # pylint: disable=redefined-builtin +from time import sleep + +from mininet.log import info, error, warn, debug +from mininet.util import ( quietRun, errRun, errFail, moveIntf, isShellBuiltin, + numCores, retry, mountCgroups, BaseString, decode, + encode, getincrementaldecoder, Python3, which, + StrictVersion ) +from mininet.moduledeps import moduleDeps, pathCheck, TUN +from mininet.link import Link, Intf, TCIntf, OVSIntf + + +# pylint: disable=too-many-arguments + + +class Node( object ): + """A virtual network node is simply a shell in a network namespace. + We communicate with it using pipes.""" + + portBase = 0 # Nodes always start with eth0/port0, even in OF 1.0 + + def __init__( self, name, inNamespace=True, **params ): + """name: name of node + inNamespace: in network namespace? + privateDirs: list of private directory strings or tuples + params: Node parameters (see config() for details)""" + + # Make sure class actually works + self.checkSetup() + + self.name = params.get( 'name', name ) + self.privateDirs = params.get( 'privateDirs', [] ) + self.inNamespace = params.get( 'inNamespace', inNamespace ) + + # Python 3 complains if we don't wait for shell exit + self.waitExited = params.get( 'waitExited', Python3 ) + + # Stash configuration parameters for future reference + self.params = params + + # dict of port numbers to interfaces + self.intfs = {} + + # dict of interfaces to port numbers + # todo: replace with Port objects, eventually ? + self.ports = {} + + self.nameToIntf = {} # dict of interface names to Intfs + + # Make pylint happy + ( self.shell, self.execed, self.pid, self.stdin, self.stdout, + self.lastPid, self.lastCmd, self.pollOut ) = ( + None, None, None, None, None, None, None, None ) + self.waiting = False + self.readbuf = '' + + # Incremental decoder for buffered reading + self.decoder = getincrementaldecoder() + + # Start command interpreter shell + self.master, self.slave = None, None # pylint + self.startShell() + self.mountPrivateDirs() + + # File descriptor to node mapping support + # Class variables and methods + + inToNode = {} # mapping of input fds to nodes + outToNode = {} # mapping of output fds to nodes + + @classmethod + def fdToNode( cls, fd ): + """Return node corresponding to given file descriptor. + fd: file descriptor + returns: node""" + node = cls.outToNode.get( fd ) + return node or cls.inToNode.get( fd ) + + # Command support via shell process in namespace + def startShell( self, mnopts=None ): + "Start a shell process for running commands" + if self.shell: + error( "%s: shell is already running\n" % self.name ) + return + # mnexec: (c)lose descriptors, (d)etach from tty, + # (p)rint pid, and run in (n)amespace + opts = '-cd' if mnopts is None else mnopts + if self.inNamespace: + opts += 'n' + # bash -i: force interactive + # -s: pass $* to shell, and make process easy to find in ps + # prompt is set to sentinel chr( 127 ) + cmd = [ 'mnexec', opts, 'env', 'PS1=' + chr( 127 ), + 'bash', '--norc', '--noediting', + '-is', 'mininet:' + self.name ] + + # Spawn a shell subprocess in a pseudo-tty, to disable buffering + # in the subprocess and insulate it from signals (e.g. SIGINT) + # received by the parent + self.master, self.slave = pty.openpty() + self.shell = self._popen( cmd, stdin=self.slave, stdout=self.slave, + stderr=self.slave, close_fds=False ) + # XXX BL: This doesn't seem right, and we should also probably + # close our files when we exit... + self.stdin = os.fdopen( self.master, 'r' ) + self.stdout = self.stdin + self.pid = self.shell.pid + self.pollOut = select.poll() + self.pollOut.register( self.stdout ) + # Maintain mapping between file descriptors and nodes + # This is useful for monitoring multiple nodes + # using select.poll() + self.outToNode[ self.stdout.fileno() ] = self + self.inToNode[ self.stdin.fileno() ] = self + self.execed = False + self.lastCmd = None + self.lastPid = None + self.readbuf = '' + # Wait for prompt + while True: + data = self.read( 1024 ) + if data[ -1 ] == chr( 127 ): + break + self.pollOut.poll() + self.waiting = False + # +m: disable job control notification + self.cmd( 'unset HISTFILE; stty -echo; set +m' ) + + def mountPrivateDirs( self ): + "mount private directories" + # Avoid expanding a string into a list of chars + assert not isinstance( self.privateDirs, BaseString ) + for directory in self.privateDirs: + if isinstance( directory, tuple ): + # mount given private directory + privateDir = directory[ 1 ] % self.__dict__ + mountPoint = directory[ 0 ] + self.cmd( 'mkdir -p %s' % privateDir ) + self.cmd( 'mkdir -p %s' % mountPoint ) + self.cmd( 'mount --bind %s %s' % + ( privateDir, mountPoint ) ) + else: + # mount temporary filesystem on directory + self.cmd( 'mkdir -p %s' % directory ) + self.cmd( 'mount -n -t tmpfs tmpfs %s' % directory ) + + def unmountPrivateDirs( self ): + "mount private directories" + for directory in self.privateDirs: + if isinstance( directory, tuple ): + self.cmd( 'umount ', directory[ 0 ] ) + else: + self.cmd( 'umount ', directory ) + + def _popen( self, cmd, **params ): + """Internal method: spawn and return a process + cmd: command to run (list) + params: parameters to Popen()""" + # Leave this is as an instance method for now + assert self + popen = Popen( cmd, **params ) # pylint: disable=consider-using-with + debug( '_popen', cmd, popen.pid ) + return popen + + def cleanup( self ): + "Help python collect its garbage." + # We used to do this, but it slows us down: + # Intfs may end up in root NS + # for intfName in self.intfNames(): + # if self.name in intfName: + # quietRun( 'ip link del ' + intfName ) + if self.shell: + # Close ptys + self.stdin.close() + os.close(self.slave) + if self.waitExited: + debug( 'waiting for', self.pid, 'to terminate\n' ) + self.shell.wait() + self.shell = None + + # Subshell I/O, commands and control + + def read( self, size=1024 ): + """Buffered read from node, potentially blocking. + size: maximum number of characters to return""" + count = len( self.readbuf ) + if count < size: + data = os.read( self.stdout.fileno(), size - count ) + self.readbuf += self.decoder.decode( data ) + if size >= len( self.readbuf ): + result = self.readbuf + self.readbuf = '' + else: + result = self.readbuf[ :size ] + self.readbuf = self.readbuf[ size: ] + return result + + def readline( self ): + """Buffered readline from node, potentially blocking. + returns: line (minus newline) or None""" + self.readbuf += self.read( 1024 ) + if '\n' not in self.readbuf: + return None + pos = self.readbuf.find( '\n' ) + line = self.readbuf[ 0: pos ] + self.readbuf = self.readbuf[ pos + 1: ] + return line + + def write( self, data ): + """Write data to node. + data: string""" + os.write( self.stdin.fileno(), encode( data ) ) + + def terminate( self ): + "Send kill signal to Node and clean up after it." + self.unmountPrivateDirs() + if self.shell: + if self.shell.poll() is None: + os.killpg( self.shell.pid, signal.SIGHUP ) + self.cleanup() + + def stop( self, deleteIntfs=False ): + """Stop node. + deleteIntfs: delete interfaces? (False)""" + if deleteIntfs: + self.deleteIntfs() + self.terminate() + + def waitReadable( self, timeoutms=None ): + """Wait until node's output is readable. + timeoutms: timeout in ms or None to wait indefinitely. + returns: result of poll()""" + if len( self.readbuf ) == 0: + return self.pollOut.poll( timeoutms ) + return None + + def sendCmd( self, *args, **kwargs ): + """Send a command, followed by a command to echo a sentinel, + and return without waiting for the command to complete. + args: command and arguments, or string + printPid: print command's PID? (False)""" + assert self.shell and not self.waiting + printPid = kwargs.get( 'printPid', False ) + # Allow sendCmd( [ list ] ) + if len( args ) == 1 and isinstance( args[ 0 ], list ): + cmd = args[ 0 ] + # Allow sendCmd( cmd, arg1, arg2... ) + elif len( args ) > 0: + cmd = args + # Convert to string + if not isinstance( cmd, str ): + cmd = ' '.join( [ str( c ) for c in cmd ] ) + if not re.search( r'\w', cmd ): + # Replace empty commands with something harmless + cmd = 'echo -n' + self.lastCmd = cmd + # if a builtin command is backgrounded, it still yields a PID + if len( cmd ) > 0 and cmd[ -1 ] == '&': + # print ^A{pid}\n so monitor() can set lastPid + cmd += ' printf "\\001%d\\012" $! ' + elif printPid and not isShellBuiltin( cmd ): + cmd = 'mnexec -p ' + cmd + self.write( cmd + '\n' ) + self.lastPid = None + self.waiting = True + + def sendInt( self, intr=chr( 3 ) ): + "Interrupt running command." + debug( 'sendInt: writing chr(%d)\n' % ord( intr ) ) + self.write( intr ) + + def monitor( self, timeoutms=None, findPid=True ): + """Monitor and return the output of a command. + Set self.waiting to False if command has completed. + timeoutms: timeout in ms or None to wait indefinitely + findPid: look for PID from mnexec -p""" + ready = self.waitReadable( timeoutms ) + if not ready: + return '' + data = self.read( 1024 ) + pidre = r'\[\d+\] \d+\r\n' + # Look for PID + marker = chr( 1 ) + r'\d+\r\n' + if findPid and chr( 1 ) in data: + # suppress the job and PID of a backgrounded command + if re.findall( pidre, data ): + data = re.sub( pidre, '', data ) + # Marker can be read in chunks; continue until all of it is read + while not re.findall( marker, data ): + data += self.read( 1024 ) + markers = re.findall( marker, data ) + if markers: + self.lastPid = int( markers[ 0 ][ 1: ] ) + data = re.sub( marker, '', data ) + # Look for sentinel/EOF + if len( data ) > 0 and data[ -1 ] == chr( 127 ): + self.waiting = False + data = data[ :-1 ] + elif chr( 127 ) in data: + self.waiting = False + data = data.replace( chr( 127 ), '' ) + return data + + def waitOutput( self, verbose=False, findPid=True ): + """Wait for a command to complete. + Completion is signaled by a sentinel character, ASCII(127) + appearing in the output stream. Wait for the sentinel and return + the output, including trailing newline. + verbose: print output interactively""" + log = info if verbose else debug + output = '' + while self.waiting: + data = self.monitor( findPid=findPid ) + output += data + log( data ) + return output + + def cmd( self, *args, **kwargs ): + """Send a command, wait for output, and return it. + cmd: string""" + verbose = kwargs.get( 'verbose', False ) + log = info if verbose else debug + log( '*** %s : %s\n' % ( self.name, args ) ) + if self.shell: + self.sendCmd( *args, **kwargs ) + return self.waitOutput( verbose ) + else: + warn( '(%s exited - ignoring cmd%s)\n' % ( self, args ) ) + return None + + def cmdPrint( self, *args): + """Call cmd and printing its output + cmd: string""" + return self.cmd( *args, **{ 'verbose': True } ) + + def popen( self, *args, **kwargs ): + """Return a Popen() object in our namespace + args: Popen() args, single list, or string + kwargs: Popen() keyword args""" + defaults = { 'stdout': PIPE, 'stderr': PIPE, + 'mncmd': + [ 'mnexec', '-da', str( self.pid ) ] } + defaults.update( kwargs ) + shell = defaults.pop( 'shell', False ) + if len( args ) == 1: + if isinstance( args[ 0 ], list ): + # popen([cmd, arg1, arg2...]) + cmd = args[ 0 ] + elif isinstance( args[ 0 ], BaseString ): + # popen("cmd arg1 arg2...") + cmd = [ args[ 0 ] ] if shell else args[ 0 ].split() + else: + raise Exception( 'popen() requires a string or list' ) + elif len( args ) > 0: + # popen( cmd, arg1, arg2... ) + cmd = list( args ) + if shell: + cmd = [ os.environ[ 'SHELL' ], '-c' ] + [ ' '.join( cmd ) ] + # Attach to our namespace using mnexec -a + cmd = defaults.pop( 'mncmd' ) + cmd + popen = self._popen( cmd, **defaults ) + return popen + + def pexec( self, *args, **kwargs ): + """Execute a command using popen + returns: out, err, exitcode""" + popen = self.popen( *args, stdin=PIPE, stdout=PIPE, stderr=PIPE, + **kwargs ) + # Warning: this can fail with large numbers of fds! + out, err = popen.communicate() + exitcode = popen.wait() + return decode( out ), decode( err ), exitcode + + # Interface management, configuration, and routing + + # BL notes: This might be a bit redundant or over-complicated. + # However, it does allow a bit of specialization, including + # changing the canonical interface names. It's also tricky since + # the real interfaces are created as veth pairs, so we can't + # make a single interface at a time. + + def newPort( self ): + "Return the next port number to allocate." + if len( self.ports ) > 0: + return max( self.ports.values() ) + 1 + return self.portBase + + def addIntf( self, intf, port=None, moveIntfFn=moveIntf ): + """Add an interface. + intf: interface + port: port number (optional, typically OpenFlow port number) + moveIntfFn: function to move interface (optional)""" + if port is None: + port = self.newPort() + self.intfs[ port ] = intf + self.ports[ intf ] = port + self.nameToIntf[ intf.name ] = intf + debug( '\n' ) + debug( 'added intf %s (%d) to node %s\n' % ( + intf, port, self.name ) ) + if self.inNamespace: + debug( 'moving', intf, 'into namespace for', self.name, '\n' ) + moveIntfFn( intf.name, self ) + + def delIntf( self, intf ): + """Remove interface from Node's known interfaces + Note: to fully delete interface, call intf.delete() instead""" + port = self.ports.get( intf ) + if port is not None: + del self.intfs[ port ] + del self.ports[ intf ] + del self.nameToIntf[ intf.name ] + + def defaultIntf( self ): + "Return interface for lowest port" + ports = self.intfs.keys() + if ports: + return self.intfs[ min( ports ) ] + else: + warn( '*** defaultIntf: warning:', self.name, + 'has no interfaces\n' ) + return None + + def intf( self, intf=None ): + """Return our interface object with given string name, + default intf if name is falsy (None, empty string, etc). + or the input intf arg. + + Having this fcn return its arg for Intf objects makes it + easier to construct functions with flexible input args for + interfaces (those that accept both string names and Intf objects). + """ + if not intf: + return self.defaultIntf() + elif isinstance( intf, BaseString): + return self.nameToIntf[ intf ] + else: + return intf + + def connectionsTo( self, node): + "Return [ intf1, intf2... ] for all intfs that connect self to node." + # We could optimize this if it is important + connections = [] + for intf in self.intfList(): + link = intf.link + if link: + node1, node2 = link.intf1.node, link.intf2.node + if node1 == self and node2 == node: + connections += [ ( intf, link.intf2 ) ] + elif node1 == node and node2 == self: + connections += [ ( intf, link.intf1 ) ] + return connections + + def deleteIntfs( self, checkName=True ): + """Delete all of our interfaces. + checkName: only delete interfaces that contain our name""" + # In theory the interfaces should go away after we shut down. + # However, this takes time, so we're better off removing them + # explicitly so that we won't get errors if we run before they + # have been removed by the kernel. Unfortunately this is very slow, + # at least with Linux kernels before 2.6.33 + for intf in list( self.intfs.values() ): + # Protect against deleting hardware interfaces + if ( self.name in intf.name ) or ( not checkName ): + intf.delete() + info( '.' ) + + # Routing support + + def setARP( self, ip, mac ): + """Add an ARP entry. + ip: IP address as string + mac: MAC address as string""" + result = self.cmd( 'arp', '-s', ip, mac ) + return result + + def setHostRoute( self, ip, intf ): + """Add route to host. + ip: IP address as dotted decimal + intf: string, interface name""" + return self.cmd( 'route add -host', ip, 'dev', intf ) + + def setDefaultRoute( self, intf=None ): + """Set the default route to go through intf. + intf: Intf or {dev via ...}""" + # Note setParam won't call us if intf is none + if isinstance( intf, BaseString ) and ' ' in intf: + params = intf + else: + params = 'dev %s' % intf + # Do this in one line in case we're messing with the root namespace + self.cmd( 'ip route del default; ip route add default', params ) + + # Convenience and configuration methods + + def setMAC( self, mac, intf=None ): + """Set the MAC address for an interface. + intf: intf or intf name + mac: MAC address as string""" + return self.intf( intf ).setMAC( mac ) + + def setIP( self, ip, prefixLen=8, intf=None, **kwargs ): + """Set the IP address for an interface. + intf: intf or intf name + ip: IP address as a string + prefixLen: prefix length, e.g. 8 for /8 or 16M addrs + kwargs: any additional arguments for intf.setIP""" + return self.intf( intf ).setIP( ip, prefixLen, **kwargs ) + + def IP( self, intf=None ): + "Return IP address of a node or specific interface." + return self.intf( intf ).IP() + + def MAC( self, intf=None ): + "Return MAC address of a node or specific interface." + return self.intf( intf ).MAC() + + def intfIsUp( self, intf=None ): + "Check if an interface is up." + return self.intf( intf ).isUp() + + # The reason why we configure things in this way is so + # That the parameters can be listed and documented in + # the config method. + # Dealing with subclasses and superclasses is slightly + # annoying, but at least the information is there! + + def setParam( self, results, method, **param ): + """Internal method: configure a *single* parameter + results: dict of results to update + method: config method name + param: arg=value (ignore if value=None) + value may also be list or dict""" + name, value = list( param.items() )[ 0 ] + if value is None: + return None + f = getattr( self, method, None ) + if not f: + return None + if isinstance( value, list ): + result = f( *value ) + elif isinstance( value, dict ): + result = f( **value ) + else: + result = f( value ) + results[ name ] = result + return result + + def config( self, mac=None, ip=None, + defaultRoute=None, lo='up', **_params ): + """Configure Node according to (optional) parameters: + mac: MAC address for default interface + ip: IP address for default interface + ifconfig: arbitrary interface configuration + Subclasses should override this method and call + the parent class's config(**params)""" + # If we were overriding this method, we would call + # the superclass config method here as follows: + # r = Parent.config( **_params ) + r = {} + self.setParam( r, 'setMAC', mac=mac ) + self.setParam( r, 'setIP', ip=ip ) + self.setParam( r, 'setDefaultRoute', defaultRoute=defaultRoute ) + # This should be examined + self.cmd( 'ifconfig lo ' + lo ) + return r + + def configDefault( self, **moreParams ): + "Configure with default parameters" + self.params.update( moreParams ) + self.config( **self.params ) + + # This is here for backward compatibility + def linkTo( self, node, link=Link ): + """(Deprecated) Link to another node + replace with Link( node1, node2)""" + return link( self, node ) + + # Other methods + + def intfList( self ): + "List of our interfaces sorted by port number" + return [ self.intfs[ p ] for p in sorted( self.intfs.keys() ) ] + + def intfNames( self ): + "The names of our interfaces sorted by port number" + return [ str( i ) for i in self.intfList() ] + + def __repr__( self ): + "More informative string representation" + intfs = ( ','.join( [ '%s:%s' % ( i.name, i.IP() ) + for i in self.intfList() ] ) ) + return '<%s %s: %s pid=%s> ' % ( + self.__class__.__name__, self.name, intfs, self.pid ) + + def __str__( self ): + "Abbreviated string representation" + return self.name + + # Automatic class setup support + + isSetup = False + + @classmethod + def checkSetup( cls ): + "Make sure our class and superclasses are set up" + clas = cls + while clas and not getattr( clas, 'isSetup', True ): + clas.setup() + clas.isSetup = True + # Make pylint happy + clas = getattr( type( clas ), '__base__', None ) + + @classmethod + def setup( cls ): + "Make sure our class dependencies are available" + pathCheck( 'mnexec', 'ifconfig', moduleName='Mininet') + +class Host( Node ): + "A host is simply a Node" + pass + +class CPULimitedHost( Host ): + + "CPU limited host" + + def __init__( self, name, sched='cfs', **params ): + Host.__init__( self, name, **params ) + # BL: Setting the correct period/quota is tricky, particularly + # for RT. RT allows very small quotas, but the overhead + # seems to be high. CFS has a minimum quota of 1 ms, but + # still does better with larger period values. + self.period_us = params.get( 'period_us', 100000 ) + self.sched = sched + self.cgroupsInited = False + self.cgroup, self.rtprio = None, None + + def initCgroups( self ): + "Deferred cgroup initialization" + if self.cgroupsInited: + return + # Initialize class if necessary + if not CPULimitedHost.inited: + CPULimitedHost.init() + # Create a cgroup and move shell into it + self.cgroup = 'cpu,cpuacct,cpuset:/' + self.name + errFail( 'cgcreate -g ' + self.cgroup ) + # We don't add ourselves to a cpuset because you must + # specify the cpu and memory placement first + errFail( 'cgclassify -g cpu,cpuacct:/%s %s' % ( self.name, self.pid ) ) + if self.sched == 'rt': + self.checkRtGroupSched() + self.rtprio = 20 + + def cgroupSet( self, param, value, resource='cpu' ): + "Set a cgroup parameter and return its value" + cmd = [ 'cgset', '-r', "%s.%s=%s" % ( + resource, param, value), '/' + self.name ] + errFail( cmd ) + nvalue = self.cgroupGet( param, resource ) + if nvalue != str( value ): + error( '*** error: cgroupSet: %s set to %s instead of %s\n' + % ( param, nvalue, value ) ) + return nvalue + + def cgroupGet( self, param, resource='cpu' ): + "Return value of cgroup parameter" + pname = '%s.%s' % ( resource, param ) + cmd = 'cgget -n -r %s /%s' % ( pname, self.name ) + return quietRun( cmd )[len(pname)+1:].strip() + + def cgroupDel( self ): + "Clean up our cgroup" + # info( '*** deleting cgroup', self.cgroup, '\n' ) + _out, _err, exitcode = errRun( 'cgdelete -r ' + self.cgroup ) + # Sometimes cgdelete returns a resource busy error but still + # deletes the group; next attempt will give "no such file" + return exitcode == 0 or ( 'no such file' in _err.lower() ) + + def popen( self, *args, **kwargs ): + """Return a Popen() object in node's namespace + args: Popen() args, single list, or string + kwargs: Popen() keyword args""" + # Tell mnexec to execute command in our cgroup + mncmd = kwargs.pop( 'mncmd', [ 'mnexec', '-g', self.name, + '-da', str( self.pid ) ] ) + # if our cgroup is not given any cpu time, + # we cannot assign the RR Scheduler. + if self.sched == 'rt': + if int( self.cgroupGet( 'rt_runtime_us', 'cpu' ) ) <= 0: + mncmd += [ '-r', str( self.rtprio ) ] + else: + debug( '*** error: not enough cpu time available for %s.' % + self.name, 'Using cfs scheduler for subprocess\n' ) + return Host.popen( self, *args, mncmd=mncmd, **kwargs ) + + def cleanup( self ): + "Clean up Node, then clean up our cgroup" + super( CPULimitedHost, self ).cleanup() + retry( retries=3, delaySecs=.1, fn=self.cgroupDel ) + + _rtGroupSched = False # internal class var: Is CONFIG_RT_GROUP_SCHED set? + + @classmethod + def checkRtGroupSched( cls ): + "Check (Ubuntu,Debian) kernel config for CONFIG_RT_GROUP_SCHED for RT" + if not cls._rtGroupSched: + release = quietRun( 'uname -r' ).strip('\r\n') + output = quietRun( 'grep CONFIG_RT_GROUP_SCHED /boot/config-%s' % + release ) + if output == '# CONFIG_RT_GROUP_SCHED is not set\n': + error( '\n*** error: please enable RT_GROUP_SCHED ' + 'in your kernel\n' ) + exit( 1 ) + cls._rtGroupSched = True + + def chrt( self ): + "Set RT scheduling priority" + quietRun( 'chrt -p %s %s' % ( self.rtprio, self.pid ) ) + result = quietRun( 'chrt -p %s' % self.pid ) + firstline = result.split( '\n' )[ 0 ] + lastword = firstline.split( ' ' )[ -1 ] + if lastword != 'SCHED_RR': + error( '*** error: could not assign SCHED_RR to %s\n' % self.name ) + return lastword + + def rtInfo( self, f ): + "Internal method: return parameters for RT bandwidth" + pstr, qstr = 'rt_period_us', 'rt_runtime_us' + # RT uses wall clock time for period and quota + quota = int( self.period_us * f ) + return pstr, qstr, self.period_us, quota + + def cfsInfo( self, f ): + "Internal method: return parameters for CFS bandwidth" + pstr, qstr = 'cfs_period_us', 'cfs_quota_us' + if self.cgversion == 'cgroup2': + pstr, qstr = 'max', '' + # CFS uses wall clock time for period and CPU time for quota. + quota = int( self.period_us * f * numCores() ) + period = self.period_us + if f > 0 and quota < 1000: + debug( '(cfsInfo: increasing default period) ' ) + quota = 1000 + period = int( quota / f / numCores() ) + # Reset to unlimited on negative quota + if quota < 0: + quota = 'max' if self.cgversion == 'cgroup2' else -1 + return pstr, qstr, period, quota + + # BL comment: + # This may not be the right API, + # since it doesn't specify CPU bandwidth in "absolute" + # units the way link bandwidth is specified. + # We should use MIPS or SPECINT or something instead. + # Alternatively, we should change from system fraction + # to CPU seconds per second, essentially assuming that + # all CPUs are the same. + + def setCPUFrac( self, f, sched=None ): + """Set overall CPU fraction for this host + f: CPU bandwidth limit (positive fraction, or -1 for cfs unlimited) + sched: 'rt' or 'cfs' + Note 'cfs' requires CONFIG_CFS_BANDWIDTH, + and 'rt' requires CONFIG_RT_GROUP_SCHED""" + if not sched: + sched = self.sched + if sched == 'rt': + if not f or f < 0: + raise Exception( 'Please set a positive CPU fraction' + ' for sched=rt\n' ) + pstr, qstr, period, quota = self.rtInfo( f ) + elif sched == 'cfs': + pstr, qstr, period, quota = self.cfsInfo( f ) + else: + return + # Set cgroup's period and quota + if self.cgversion == 'cgroup': + setPeriod = self.cgroupSet( pstr, period ) + setQuota = self.cgroupSet( qstr, quota ) + else: + setQuota, setPeriod = self.cgroupSet( + pstr, '%s %s' % (quota, period) ).split() + if sched == 'rt': + # Set RT priority if necessary + sched = self.chrt() + info( '(%s %s/%dus) ' % ( sched, setQuota, int( setPeriod ) ) ) + + def setCPUs( self, cores, mems=0 ): + "Specify (real) cores that our cgroup can run on" + if not cores: + return + if isinstance( cores, list ): + cores = ','.join( [ str( c ) for c in cores ] ) + self.cgroupSet( resource='cpuset', param='cpus', + value=cores ) + # Memory placement is probably not relevant, but we + # must specify it anyway + self.cgroupSet( resource='cpuset', param='mems', + value=mems) + # We have to do this here after we've specified + # cpus and mems + errFail( 'cgclassify -g cpuset:/%s %s' % ( + self.name, self.pid ) ) + + # pylint: disable=arguments-differ + def config( self, cpu=-1, cores=None, **params ): + """cpu: desired overall system CPU fraction + cores: (real) core(s) this host can run on + params: parameters for Node.config()""" + r = Node.config( self, **params ) + self.initCgroups() + # Was considering cpu={'cpu': cpu , 'sched': sched}, but + # that seems redundant + self.setParam( r, 'setCPUFrac', cpu=cpu ) + self.setParam( r, 'setCPUs', cores=cores ) + return r + + inited = False + cgversion = 'cgroup2' + + @classmethod + def init( cls ): + "Initialization for CPULimitedHost class" + cls.cgversion = mountCgroups() + cls.inited = True + + def unlimit( self ): + "Unlimit cpu for cfs" + if self.sched == 'cfs' and self.params.get( 'cpu', -1 ) != -1: + self.setCPUFrac( -1, sched=self.sched ) + + +# Some important things to note: +# +# The "IP" address which setIP() assigns to the switch is not +# an "IP address for the switch" in the sense of IP routing. +# Rather, it is the IP address for the control interface, +# on the control network, and it is only relevant to the +# controller. If you are running in the root namespace +# (which is the only way to run OVS at the moment), the +# control interface is the loopback interface, and you +# normally never want to change its IP address! +# +# In general, you NEVER want to attempt to use Linux's +# network stack (i.e. ifconfig) to "assign" an IP address or +# MAC address to a switch data port. Instead, you "assign" +# the IP and MAC addresses in the controller by specifying +# packets that you want to receive or send. The "MAC" address +# reported by ifconfig for a switch data port is essentially +# meaningless. It is important to understand this if you +# want to create a functional router using OpenFlow. + +class Switch( Node ): + """A Switch is a Node that is running (or has execed?) + an OpenFlow switch.""" + + portBase = 1 # Switches start with port 1 in OpenFlow + dpidLen = 16 # digits in dpid passed to switch + + def __init__( self, name, dpid=None, opts='', listenPort=None, **params): + """dpid: dpid hex string (or None to derive from name, e.g. s1 -> 1) + opts: additional switch options + listenPort: port to listen on for dpctl connections""" + Node.__init__( self, name, **params ) + self.dpid = self.defaultDpid( dpid ) + self.opts = opts + self.listenPort = listenPort + if not self.inNamespace: + self.controlIntf = Intf( 'lo', self, port=0 ) + + def defaultDpid( self, dpid=None ): + "Return correctly formatted dpid from dpid or switch name (s1 -> 1)" + if dpid: + # Remove any colons and make sure it's a good hex number + dpid = dpid.replace( ':', '' ) + assert len( dpid ) <= self.dpidLen and int( dpid, 16 ) >= 0 + else: + # Use hex of the first number in the switch name + nums = re.findall( r'\d+', self.name ) + if nums: + dpid = hex( int( nums[ 0 ] ) )[ 2: ] + else: + self.terminate() # Python 3.6 crash workaround + raise Exception( 'Unable to derive default datapath ID - ' + 'please either specify a dpid or use a ' + 'canonical switch name such as s23.' ) + return '0' * ( self.dpidLen - len( dpid ) ) + dpid + + def defaultIntf( self ): + "Return control interface" + if self.controlIntf: + return self.controlIntf + else: + return Node.defaultIntf( self ) + + def sendCmd( self, *cmd, **kwargs ): + """Send command to Node. + cmd: string""" + kwargs.setdefault( 'printPid', False ) + if not self.execed: + return Node.sendCmd( self, *cmd, **kwargs ) + else: + error( '*** Error: %s has execed and cannot accept commands' % + self.name ) + return None + + def connected( self ): + "Is the switch connected to a controller? (override this method)" + # Assume that we are connected by default to whatever we need to + # be connected to. This should be overridden by any OpenFlow + # switch, but not by a standalone bridge. + debug( 'Assuming', repr( self ), 'is connected to a controller\n' ) + return True + + def stop( self, deleteIntfs=True ): + """Stop switch + deleteIntfs: delete interfaces? (True)""" + if deleteIntfs: + self.deleteIntfs() + + def __repr__( self ): + "More informative string representation" + intfs = ( ','.join( [ '%s:%s' % ( i.name, i.IP() ) + for i in self.intfList() ] ) ) + return '<%s %s: %s pid=%s> ' % ( + self.__class__.__name__, self.name, intfs, self.pid ) + + +class UserSwitch( Switch ): + "User-space switch." + + dpidLen = 12 + + def __init__( self, name, dpopts='--no-slicing', **kwargs ): + """Init. + name: name for the switch + dpopts: additional arguments to ofdatapath (--no-slicing)""" + Switch.__init__( self, name, **kwargs ) + pathCheck( 'ofdatapath', 'ofprotocol', + moduleName='the OpenFlow reference user switch' + + '(openflow.org)' ) + if self.listenPort: + self.opts += ' --listen=ptcp:%i ' % self.listenPort + else: + self.opts += ' --listen=punix:/tmp/%s.listen' % self.name + self.dpopts = dpopts + + @classmethod + def setup( cls ): + "Ensure any dependencies are loaded; if not, try to load them." + if not os.path.exists( '/dev/net/tun' ): + moduleDeps( add=TUN ) + + def dpctl( self, *args ): + "Run dpctl command" + listenAddr = None + if not self.listenPort: + listenAddr = 'unix:/tmp/%s.listen' % self.name + else: + listenAddr = 'tcp:127.0.0.1:%i' % self.listenPort + return self.cmd( 'dpctl ' + ' '.join( args ) + + ' ' + listenAddr ) + + def connected( self ): + "Is the switch connected to a controller?" + status = self.dpctl( 'status' ) + return ( 'remote.is-connected=true' in status and + 'local.is-connected=true' in status ) + + @staticmethod + def TCReapply( intf ): + """Unfortunately user switch and Mininet are fighting + over tc queuing disciplines. To resolve the conflict, + we re-create the user switch's configuration, but as a + leaf of the TCIntf-created configuration.""" + if isinstance( intf, TCIntf ): + ifspeed = 10000000000 # 10 Gbps + minspeed = ifspeed * 0.001 + + res = intf.config( **intf.params ) + + if res is None: # link may not have TC parameters + return + + # Re-add qdisc, root, and default classes user switch created, but + # with new parent, as setup by Mininet's TCIntf + parent = res['parent'] + intf.tc( "%s qdisc add dev %s " + parent + + " handle 1: htb default 0xfffe" ) + intf.tc( "%s class add dev %s classid 1:0xffff parent 1: htb rate " + + str(ifspeed) ) + intf.tc( "%s class add dev %s classid 1:0xfffe parent 1:0xffff " + + "htb rate " + str(minspeed) + " ceil " + str(ifspeed) ) + + def start( self, controllers ): + """Start OpenFlow reference user datapath. + Log to /tmp/sN-{ofd,ofp}.log. + controllers: list of controller objects""" + # Add controllers + clist = ','.join( [ 'tcp:%s:%d' % ( c.IP(), c.port ) + for c in controllers ] ) + ofdlog = '/tmp/' + self.name + '-ofd.log' + ofplog = '/tmp/' + self.name + '-ofp.log' + intfs = [ str( i ) for i in self.intfList() if not i.IP() ] + self.cmd( 'ofdatapath -i ' + ','.join( intfs ) + + ' punix:/tmp/' + self.name + ' -d %s ' % self.dpid + + self.dpopts + + ' 1> ' + ofdlog + ' 2> ' + ofdlog + ' &' ) + self.cmd( 'ofprotocol unix:/tmp/' + self.name + + ' ' + clist + + ' --fail=closed ' + self.opts + + ' 1> ' + ofplog + ' 2>' + ofplog + ' &' ) + if "no-slicing" not in self.dpopts: + # Only TCReapply if slicing is enable + sleep(1) # Allow ofdatapath to start before re-arranging qdisc's + for intf in self.intfList(): + if not intf.IP(): + self.TCReapply( intf ) + + def stop( self, deleteIntfs=True ): + """Stop OpenFlow reference user datapath. + deleteIntfs: delete interfaces? (True)""" + self.cmd( 'kill %ofdatapath' ) + self.cmd( 'kill %ofprotocol' ) + super( UserSwitch, self ).stop( deleteIntfs ) + + +class OVSSwitch( Switch ): + "Open vSwitch switch. Depends on ovs-vsctl." + + def __init__( self, name, failMode='secure', datapath='kernel', + inband=False, protocols=None, + reconnectms=1000, stp=False, batch=False, **params ): + """name: name for switch + failMode: controller loss behavior (secure|standalone) + datapath: userspace or kernel mode (kernel|user) + inband: use in-band control (False) + protocols: use specific OpenFlow version(s) (e.g. OpenFlow13) + Unspecified (or old OVS version) uses OVS default + reconnectms: max reconnect timeout in ms (0/None for default) + stp: enable STP (False, requires failMode=standalone) + batch: enable batch startup (False)""" + Switch.__init__( self, name, **params ) + self.failMode = failMode + self.datapath = datapath + self.inband = inband + self.protocols = protocols + self.reconnectms = reconnectms + self.stp = stp + self._uuids = [] # controller UUIDs + self.batch = batch + self.commands = [] # saved commands for batch startup + + @classmethod + def setup( cls ): + "Make sure Open vSwitch is installed and working" + pathCheck( 'ovs-vsctl', + moduleName='Open vSwitch (openvswitch.org)') + # This should no longer be needed, and it breaks + # with OVS 1.7 which has renamed the kernel module: + # moduleDeps( subtract=OF_KMOD, add=OVS_KMOD ) + out, err, exitcode = errRun( 'ovs-vsctl -t 1 show' ) + if exitcode: + error( out + err + + 'ovs-vsctl exited with code %d\n' % exitcode + + '*** Error connecting to ovs-db with ovs-vsctl\n' + 'Make sure that Open vSwitch is installed, ' + 'that ovsdb-server is running, and that\n' + '"ovs-vsctl show" works correctly.\n' + 'You may wish to try ' + '"service openvswitch-switch start".\n' ) + exit( 1 ) + version = quietRun( 'ovs-vsctl --version' ) + cls.OVSVersion = findall( r'\d+\.\d+', version )[ 0 ] + + @classmethod + def isOldOVS( cls ): + "Is OVS ersion < 1.10?" + return ( StrictVersion( cls.OVSVersion ) < + StrictVersion( '1.10' ) ) + + def dpctl( self, *args ): + "Run ovs-ofctl command" + return self.cmd( 'ovs-ofctl', args[ 0 ], self, *args[ 1: ] ) + + def vsctl( self, *args, **kwargs ): + "Run ovs-vsctl command (or queue for later execution)" + if self.batch: + cmd = ' '.join( str( arg ).strip() for arg in args ) + self.commands.append( cmd ) + return None + else: + return self.cmd( 'ovs-vsctl', *args, **kwargs ) + + @staticmethod + def TCReapply( intf ): + """Unfortunately OVS and Mininet are fighting + over tc queuing disciplines. As a quick hack/ + workaround, we clear OVS's and reapply our own.""" + if isinstance( intf, TCIntf ): + intf.config( **intf.params ) + + def attach( self, intf ): + "Connect a data port" + self.vsctl( 'add-port', self, intf ) + self.cmd( 'ifconfig', intf, 'up' ) + self.TCReapply( intf ) + + def detach( self, intf ): + "Disconnect a data port" + self.vsctl( 'del-port', self, intf ) + + def controllerUUIDs( self, update=False ): + """Return ovsdb UUIDs for our controllers + update: update cached value""" + if not self._uuids or update: + controllers = self.cmd( 'ovs-vsctl -- get Bridge', self, + 'Controller' ).strip() + if controllers.startswith( '[' ) and controllers.endswith( ']' ): + controllers = controllers[ 1 : -1 ] + if controllers: + self._uuids = [ c.strip() + for c in controllers.split( ',' ) ] + return self._uuids + + def connected( self ): + "Are we connected to at least one of our controllers?" + for uuid in self.controllerUUIDs(): + if 'true' in self.vsctl( '-- get Controller', + uuid, 'is_connected' ): + return True + return self.failMode == 'standalone' + + def intfOpts( self, intf ): + "Return OVS interface options for intf" + opts = '' + if not self.isOldOVS(): + # ofport_request is not supported on old OVS + opts += ' ofport_request=%s' % self.ports[ intf ] + # Patch ports don't work well with old OVS + if isinstance( intf, OVSIntf ): + intf1, intf2 = intf.link.intf1, intf.link.intf2 + peer = intf1 if intf1 != intf else intf2 + opts += ' type=patch options:peer=%s' % peer + return '' if not opts else ' -- set Interface %s' % intf + opts + + def bridgeOpts( self ): + "Return OVS bridge options" + opts = ( ' other_config:datapath-id=%s' % self.dpid + + ' fail_mode=%s' % self.failMode ) + if not self.inband: + opts += ' other-config:disable-in-band=true' + if self.datapath == 'user': + opts += ' datapath_type=netdev' + if self.protocols and not self.isOldOVS(): + opts += ' protocols=%s' % self.protocols + if self.stp and self.failMode == 'standalone': + opts += ' stp_enable=true' + opts += ' other-config:dp-desc=%s' % self.name + return opts + + def start( self, controllers ): + "Start up a new OVS OpenFlow switch using ovs-vsctl" + if self.inNamespace: + raise Exception( + 'OVS kernel switch does not work in a namespace' ) + int( self.dpid, 16 ) # DPID must be a hex string + # Command to add interfaces + intfs = ''.join( ' -- add-port %s %s' % ( self, intf ) + + self.intfOpts( intf ) + for intf in self.intfList() + if self.ports[ intf ] and not intf.IP() ) + # Command to create controller entries + clist = [ ( self.name + c.name, '%s:%s:%d' % + ( c.protocol, c.IP(), c.port ) ) + for c in controllers ] + if self.listenPort: + clist.append( ( self.name + '-listen', + 'ptcp:%s' % self.listenPort ) ) + ccmd = '-- --id=@%s create Controller target=\\"%s\\"' + if self.reconnectms: + ccmd += ' max_backoff=%d' % self.reconnectms + cargs = ' '.join( ccmd % ( name, target ) + for name, target in clist ) + # Controller ID list + cids = ','.join( '@%s' % name for name, _target in clist ) + # Try to delete any existing bridges with the same name + if not self.isOldOVS(): + cargs += ' -- --if-exists del-br %s' % self + # One ovs-vsctl command to rule them all! + self.vsctl( cargs + + ' -- add-br %s' % self + + ' -- set bridge %s controller=[%s]' % ( self, cids ) + + self.bridgeOpts() + + intfs ) + # If necessary, restore TC config overwritten by OVS + if not self.batch: + for intf in self.intfList(): + self.TCReapply( intf ) + + # This should be ~ int( quietRun( 'getconf ARG_MAX' ) ), + # but the real limit seems to be much lower + argmax = 128000 + + @classmethod + def batchStartup( cls, switches, run=errRun ): + """Batch startup for OVS + switches: switches to start up + run: function to run commands (errRun)""" + info( '...' ) + cmds = 'ovs-vsctl' + for switch in switches: + if switch.isOldOVS(): + # Ideally we'd optimize this also + run( 'ovs-vsctl del-br %s' % switch ) + for cmd in switch.commands: + cmd = cmd.strip() + # Don't exceed ARG_MAX + if len( cmds ) + len( cmd ) >= cls.argmax: + run( cmds, shell=True ) + cmds = 'ovs-vsctl' + cmds += ' ' + cmd + switch.cmds = [] + switch.batch = False + if cmds: + run( cmds, shell=True ) + # Reapply link config if necessary... + for switch in switches: + for intf in switch.intfs.values(): + if isinstance( intf, TCIntf ): + intf.config( **intf.params ) + return switches + + def stop( self, deleteIntfs=True ): + """Terminate OVS switch. + deleteIntfs: delete interfaces? (True)""" + self.cmd( 'ovs-vsctl del-br', self ) + if self.datapath == 'user': + self.cmd( 'ip link del', self ) + super( OVSSwitch, self ).stop( deleteIntfs ) + + @classmethod + def batchShutdown( cls, switches, run=errRun ): + "Shut down a list of OVS switches" + delcmd = 'del-br %s' + if switches and not switches[ 0 ].isOldOVS(): + delcmd = '--if-exists ' + delcmd + # First, delete them all from ovsdb + run( 'ovs-vsctl ' + + ' -- '.join( delcmd % s for s in switches ) ) + # Next, shut down all of the processes + pids = ' '.join( str( switch.pid ) for switch in switches ) + run( 'kill -HUP ' + pids ) + for switch in switches: + switch.terminate() + return switches + + +OVSKernelSwitch = OVSSwitch + + +class OVSBridge( OVSSwitch ): + "OVSBridge is an OVSSwitch in standalone/bridge mode" + + def __init__( self, *args, **kwargs ): + """stp: enable Spanning Tree Protocol (False) + see OVSSwitch for other options""" + kwargs.update( failMode='standalone' ) + OVSSwitch.__init__( self, *args, **kwargs ) + + def start( self, controllers ): + "Start bridge, ignoring controllers argument" + OVSSwitch.start( self, controllers=[] ) + + def connected( self ): + "Are we forwarding yet?" + if self.stp: + status = self.dpctl( 'show' ) + return 'STP_FORWARD' in status and 'STP_LEARN' not in status + else: + return True + + +class IVSSwitch( Switch ): + "Indigo Virtual Switch" + + def __init__( self, name, verbose=False, **kwargs ): + Switch.__init__( self, name, **kwargs ) + self.verbose = verbose + + @classmethod + def setup( cls ): + "Make sure IVS is installed" + pathCheck( 'ivs-ctl', 'ivs', + moduleName="Indigo Virtual Switch (projectfloodlight.org)" ) + out, err, exitcode = errRun( 'ivs-ctl show' ) + if exitcode: + error( out + err + + 'ivs-ctl exited with code %d\n' % exitcode + + '*** The openvswitch kernel module might ' + 'not be loaded. Try modprobe openvswitch.\n' ) + exit( 1 ) + + @classmethod + def batchShutdown( cls, switches ): + "Kill each IVS switch, to be waited on later in stop()" + for switch in switches: + switch.cmd( 'kill %ivs' ) + return switches + + def start( self, controllers ): + "Start up a new IVS switch" + args = ['ivs'] + args.extend( ['--name', self.name] ) + args.extend( ['--dpid', self.dpid] ) + if self.verbose: + args.extend( ['--verbose'] ) + for intf in self.intfs.values(): + if not intf.IP(): + args.extend( ['-i', intf.name] ) + for c in controllers: + args.extend( ['-c', '%s:%d' % (c.IP(), c.port)] ) + if self.listenPort: + args.extend( ['--listen', '127.0.0.1:%i' % self.listenPort] ) + args.append( self.opts ) + + logfile = '/tmp/ivs.%s.log' % self.name + + self.cmd( ' '.join(args) + ' >' + logfile + ' 2>&1 on controller. + Log to /tmp/cN.log""" + pathCheck( self.command ) + cout = '/tmp/' + self.name + '.log' + if self.cdir is not None: + self.cmd( 'cd ' + self.cdir ) + self.cmd( self.command + ' ' + self.cargs % self.port + + ' 1>' + cout + ' 2>' + cout + ' &' ) + self.execed = False + + # pylint: disable=arguments-differ,signature-differs + def stop( self, *args, **kwargs ): + "Stop controller." + self.cmd( 'kill %' + self.command ) + self.cmd( 'wait %' + self.command ) + super( Controller, self ).stop( *args, **kwargs ) + + def IP( self, intf=None ): + "Return IP address of the Controller" + if self.intfs: + ip = Node.IP( self, intf ) + else: + ip = self.ip + return ip + + def __repr__( self ): + "More informative string representation" + return '<%s %s: %s:%s pid=%s> ' % ( + self.__class__.__name__, self.name, + self.IP(), self.port, self.pid ) + + @classmethod + def isAvailable( cls ): + "Is controller available?" + return which( 'controller' ) + + +class OVSController( Controller ): + "Open vSwitch controller" + def __init__( self, name, **kwargs ): + kwargs.setdefault( 'command', self.isAvailable() or + 'ovs-controller' ) + Controller.__init__( self, name, **kwargs ) + + @classmethod + def isAvailable( cls ): + return (which( 'ovs-controller' ) or + which( 'test-controller' ) or + which( 'ovs-testcontroller' )) + +class NOX( Controller ): + "Controller to run a NOX application." + + def __init__( self, name, *noxArgs, **kwargs ): + """Init. + name: name to give controller + noxArgs: arguments (strings) to pass to NOX""" + if not noxArgs: + warn( 'warning: no NOX modules specified; ' + 'running packetdump only\n' ) + noxArgs = [ 'packetdump' ] + elif not isinstance( noxArgs, ( list, tuple ) ): + noxArgs = [ noxArgs ] + + if 'NOX_CORE_DIR' not in os.environ: + exit( 'exiting; please set missing NOX_CORE_DIR env var' ) + noxCoreDir = os.environ[ 'NOX_CORE_DIR' ] + + Controller.__init__( self, name, + command=noxCoreDir + '/nox_core', + cargs='--libdir=/usr/local/lib -v ' + '-i ptcp:%s ' + + ' '.join( noxArgs ), + cdir=noxCoreDir, + **kwargs ) + +class Ryu( Controller ): + "Ryu OpenFlow Controller" + def __init__( self, name, ryuArgs='ryu.app.simple_switch', + command='ryu run', **kwargs ): + """Init. + name: name to give controller. + ryuArgs: modules to pass to Ryu (ryu.app.simple_switch) + command: command to run Ryu ('ryu run')""" + if isinstance( ryuArgs, ( list, tuple ) ): + ryuArgs = ' '.join( ryuArgs ) + cargs = kwargs.pop( + 'cargs', ryuArgs + ' --ofp-tcp-listen-port %s' ) + Controller.__init__( self, name, command=command, + cargs=cargs, **kwargs ) + + +class RemoteController( Controller ): + "Controller running outside of Mininet's control." + + def __init__( self, name, ip='127.0.0.1', + port=None, **kwargs): + """Init. + name: name to give controller + ip: the IP address where the remote controller is + listening + port: the port where the remote controller is listening""" + Controller.__init__( self, name, ip=ip, port=port, **kwargs ) + + def start( self ): + "Overridden to do nothing." + return + + # pylint: disable=arguments-differ + def stop( self ): + "Overridden to do nothing." + return + + def checkListening( self ): + "Warn if remote controller is not accessible" + if self.port is not None: + self.isListening( self.ip, self.port ) + else: + for port in 6653, 6633: + if self.isListening( self.ip, port ): + self.port = port + info( "Connecting to remote controller" + " at %s:%d\n" % ( self.ip, self.port )) + break + + if self.port is None: + self.port = 6653 + warn( "Setting remote controller" + " to %s:%d\n" % ( self.ip, self.port )) + + def isListening( self, ip, port ): + "Check if a remote controller is listening at a specific ip and port" + listening = self.cmd( "echo A | telnet -e A %s %d" % ( ip, port ) ) + if 'Connected' not in listening: + warn( "Unable to contact the remote controller" + " at %s:%d\n" % ( ip, port ) ) + return False + else: + return True + + +DefaultControllers = ( Controller, OVSController ) + +def findController( controllers=DefaultControllers ): + "Return first available controller from list, if any" + for controller in controllers: + if controller.isAvailable(): + return controller + return None + +def DefaultController( name, controllers=DefaultControllers, **kwargs ): + "Find a controller that is available and instantiate it" + controller = findController( controllers ) + if not controller: + raise Exception( 'Could not find a default OpenFlow controller' ) + return controller( name, **kwargs ) + +def NullController( *_args, **_kwargs ): + "Nonexistent controller - simply returns None" + return None diff --git a/mininet/nodelib.py b/mininet/nodelib.py new file mode 100644 index 0000000..44ee6d0 --- /dev/null +++ b/mininet/nodelib.py @@ -0,0 +1,154 @@ +""" +Node Library for Mininet + +This contains additional Node types which you may find to be useful. +""" + +from mininet.node import Node, Switch +from mininet.log import info, warn +from mininet.moduledeps import pathCheck +from mininet.util import quietRun + + +class LinuxBridge( Switch ): + "Linux Bridge (with optional spanning tree)" + + nextPrio = 100 # next bridge priority for spanning tree + + def __init__( self, name, stp=False, prio=None, **kwargs ): + """stp: use spanning tree protocol? (default False) + prio: optional explicit bridge priority for STP""" + self.stp = stp + if prio: + self.prio = prio + else: + self.prio = LinuxBridge.nextPrio + LinuxBridge.nextPrio += 1 + Switch.__init__( self, name, **kwargs ) + + def connected( self ): + "Are we forwarding yet?" + if self.stp: + return 'forwarding' in self.cmd( 'brctl showstp', self ) + else: + return True + + def start( self, _controllers ): + "Start Linux bridge" + self.cmd( 'ifconfig', self, 'down' ) + self.cmd( 'brctl delbr', self ) + self.cmd( 'brctl addbr', self ) + if self.stp: + self.cmd( 'brctl setbridgeprio', self.prio ) + self.cmd( 'brctl stp', self, 'on' ) + for i in self.intfList(): + if self.name in i.name: + self.cmd( 'brctl addif', self, i ) + self.cmd( 'ifconfig', self, 'up' ) + + def stop( self, deleteIntfs=True ): + """Stop Linux bridge + deleteIntfs: delete interfaces? (True)""" + self.cmd( 'ifconfig', self, 'down' ) + self.cmd( 'brctl delbr', self ) + super( LinuxBridge, self ).stop( deleteIntfs ) + + def dpctl( self, *args ): + "Run brctl command" + return self.cmd( 'brctl', *args ) + + @classmethod + def setup( cls ): + "Check dependencies and warn about firewalling" + pathCheck( 'brctl', moduleName='bridge-utils' ) + # Disable Linux bridge firewalling so that traffic can flow! + for table in 'arp', 'ip', 'ip6': + cmd = 'sysctl net.bridge.bridge-nf-call-%stables' % table + out = quietRun( cmd ).strip() + if out.endswith( '1' ): + warn( 'Warning: Linux bridge may not work with', out, '\n' ) + + +class NAT( Node ): + "NAT: Provides connectivity to external network" + + def __init__( self, name, subnet='10.0/8', + localIntf=None, flush=False, **params): + """Start NAT/forwarding between Mininet and external network + subnet: Mininet subnet (default 10.0/8) + flush: flush iptables before installing NAT rules""" + super( NAT, self ).__init__( name, **params ) + + self.subnet = subnet + self.localIntf = localIntf + self.flush = flush + self.forwardState = self.cmd( 'sysctl -n net.ipv4.ip_forward' ).strip() + + def setManualConfig( self, intf ): + """Prevent network-manager/networkd from messing with our interface + by specifying manual configuration in /etc/network/interfaces""" + cfile = '/etc/network/interfaces' + line = '\niface %s inet manual\n' % intf + try: + with open( cfile ) as f: + config = f.read() + except IOError: + config = '' + if ( line ) not in config: + info( '*** Adding "' + line.strip() + '" to ' + cfile + '\n' ) + with open( cfile, 'a' ) as f: + f.write( line ) + # Probably need to restart network manager to be safe - + # hopefully this won't disconnect you + self.cmd( 'service network-manager restart || netplan apply' ) + + # pylint: disable=arguments-differ + def config( self, **params ): + """Configure the NAT and iptables""" + + if not self.localIntf: + self.localIntf = self.defaultIntf() + + self.setManualConfig( self.localIntf ) + + # Now we can configure manually without interference + super( NAT, self).config( **params ) + + if self.flush: + self.cmd( 'sysctl net.ipv4.ip_forward=0' ) + self.cmd( 'iptables -F' ) + self.cmd( 'iptables -t nat -F' ) + # Create default entries for unmatched traffic + self.cmd( 'iptables -P INPUT ACCEPT' ) + self.cmd( 'iptables -P OUTPUT ACCEPT' ) + self.cmd( 'iptables -P FORWARD DROP' ) + + # Install NAT rules + self.cmd( 'iptables -I FORWARD', + '-i', self.localIntf, '-d', self.subnet, '-j DROP' ) + self.cmd( 'iptables -A FORWARD', + '-i', self.localIntf, '-s', self.subnet, '-j ACCEPT' ) + self.cmd( 'iptables -A FORWARD', + '-o', self.localIntf, '-d', self.subnet, '-j ACCEPT' ) + self.cmd( 'iptables -t nat -A POSTROUTING', + '-s', self.subnet, "'!'", '-d', self.subnet, + '-j MASQUERADE' ) + + # Instruct the kernel to perform forwarding + self.cmd( 'sysctl net.ipv4.ip_forward=1' ) + + def terminate( self ): + "Stop NAT/forwarding between Mininet and external network" + # Remote NAT rules + self.cmd( 'iptables -D FORWARD', + '-i', self.localIntf, '-d', self.subnet, '-j DROP' ) + self.cmd( 'iptables -D FORWARD', + '-i', self.localIntf, '-s', self.subnet, '-j ACCEPT' ) + self.cmd( 'iptables -D FORWARD', + '-o', self.localIntf, '-d', self.subnet, '-j ACCEPT' ) + self.cmd( 'iptables -t nat -D POSTROUTING', + '-s', self.subnet, '\'!\'', '-d', self.subnet, + '-j MASQUERADE' ) + # Put the forwarding state back to what it was + self.cmd( 'sysctl net.ipv4.ip_forward=%s' % self.forwardState ) + super( NAT, self ).terminate() diff --git a/mininet/term.py b/mininet/term.py new file mode 100644 index 0000000..769367d --- /dev/null +++ b/mininet/term.py @@ -0,0 +1,81 @@ +""" +Terminal creation and cleanup. +Utility functions to run a terminal (connected via socat(1)) on each host. + +Requires socat(1) and xterm(1). +Optionally uses gnome-terminal. +""" + +from os import environ + +from mininet.log import error +from mininet.util import quietRun, errRun + +def tunnelX11( node, display=None): + """Create an X11 tunnel from node:6000 to the root host + display: display on root host (optional) + returns: node $DISPLAY, Popen object for tunnel""" + if display is None and 'DISPLAY' in environ: + display = environ[ 'DISPLAY' ] + if display is None: + error( "Error: Cannot connect to display\n" ) + return None, None + host, screen = display.split( ':' ) + # Unix sockets should work + if not host or host == 'unix': + # GDM3 doesn't put credentials in .Xauthority, + # so allow root to just connect + quietRun( 'xhost +si:localuser:root' ) + return display, None + else: + # Create a tunnel for the TCP connection + port = 6000 + int( float( screen ) ) + connection = r'TCP\:%s\:%s' % ( host, port ) + cmd = [ "socat", "TCP-LISTEN:%d,fork,reuseaddr" % port, + "EXEC:'mnexec -a 1 socat STDIO %s'" % connection ] + return 'localhost:' + screen, node.popen( cmd ) + +def makeTerm( node, title='Node', term='xterm', display=None, cmd='bash'): + """Create an X11 tunnel to the node and start up a terminal. + node: Node object + title: base title + term: 'xterm' or 'gterm' + returns: two Popen objects, tunnel and terminal""" + title = '"%s: %s"' % ( title, node.name ) + if not node.inNamespace: + title += ' (root)' + cmds = { + 'xterm': [ 'xterm', '-title', title, '-display' ], + 'gterm': [ 'gnome-terminal', '--title', title, '--display' ] + } + if term not in cmds: + error( 'invalid terminal type: %s' % term ) + return None + display, tunnel = tunnelX11( node, display ) + if display is None: + return [] + term = node.popen( cmds[ term ] + + [ display, '-e', 'env TERM=ansi %s' % cmd ] ) + return [ tunnel, term ] if tunnel else [ term ] + +def runX11( node, cmd ): + "Run an X11 client on a node" + _display, tunnel = tunnelX11( node ) + if _display is None: + return [] + popen = node.popen( cmd ) + return [ tunnel, popen ] + +def cleanUpScreens(): + "Remove moldy socat X11 tunnels." + errRun( "pkill -9 -f mnexec.*socat" ) + +def makeTerms( nodes, title='Node', term='xterm' ): + """Create terminals. + nodes: list of Node objects + title: base title for each + returns: list of created tunnel/terminal processes""" + terms = [] + for node in nodes: + terms += makeTerm( node, title, term ) + return terms diff --git a/mininet/test/runner.py b/mininet/test/runner.py new file mode 100755 index 0000000..4540599 --- /dev/null +++ b/mininet/test/runner.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python + +""" +Run all mininet core tests + -v : verbose output + -quick : skip tests that take more than ~30 seconds +""" + +from unittest import defaultTestLoader, TextTestRunner +import os +import sys +from mininet.util import ensureRoot +from mininet.clean import cleanup +from mininet.log import setLogLevel + +def runTests( testDir, verbosity=1 ): + "discover and run all tests in testDir" + # ensure root and cleanup before starting tests + ensureRoot() + cleanup() + # discover all tests in testDir + testSuite = defaultTestLoader.discover( testDir ) + # run tests + success = ( TextTestRunner( verbosity=verbosity ) + .run( testSuite ).wasSuccessful() ) + sys.exit( 0 if success else 1 ) + + +if __name__ == '__main__': + setLogLevel( 'warning' ) + # get the directory containing example tests + thisdir = os.path.dirname( os.path.realpath( __file__ ) ) + vlevel = 2 if '-v' in sys.argv else 1 + runTests( testDir=thisdir, verbosity=vlevel ) diff --git a/mininet/test/test_hifi.py b/mininet/test/test_hifi.py new file mode 100755 index 0000000..e1cf980 --- /dev/null +++ b/mininet/test/test_hifi.py @@ -0,0 +1,269 @@ +#!/usr/bin/env python + +"""Package: mininet + Test creation and pings for topologies with link and/or CPU options.""" + +import unittest +import sys +from functools import partial + +from mininet.net import Mininet +from mininet.node import OVSSwitch, UserSwitch, IVSSwitch +from mininet.node import CPULimitedHost +from mininet.link import TCLink +from mininet.topo import Topo +from mininet.log import setLogLevel +from mininet.util import quietRun +from mininet.clean import cleanup + +# Number of hosts for each test +N = 2 + + +class SingleSwitchOptionsTopo(Topo): + "Single switch connected to n hosts." + def __init__(self, n=2, hopts=None, lopts=None): + if not hopts: + hopts = {} + if not lopts: + lopts = {} + Topo.__init__(self, hopts=hopts, lopts=lopts) + switch = self.addSwitch('s1') + for h in range(n): + host = self.addHost('h%s' % (h + 1)) + self.addLink(host, switch) + +# Tell pylint not to complain about calls to other class +# pylint: disable=E1101 + +class testOptionsTopoCommon( object ): + """Verify ability to create networks with host and link options + (common code).""" + + switchClass = None # overridden in subclasses + + @staticmethod + def tearDown(): + "Clean up if necessary" + if sys.exc_info() != ( None, None, None ): + cleanup() + + def runOptionsTopoTest( self, n, msg, hopts=None, lopts=None ): + "Generic topology-with-options test runner." + mn = Mininet( topo=SingleSwitchOptionsTopo( n=n, hopts=hopts, + lopts=lopts ), + host=CPULimitedHost, link=TCLink, + switch=self.switchClass, waitConnected=True ) + dropped = mn.run( mn.ping ) + hoptsStr = ', '.join( '%s: %s' % ( opt, value ) + for opt, value in hopts.items() ) + loptsStr = ', '.join( '%s: %s' % ( opt, value ) + for opt, value in lopts.items() ) + msg += ( '%s%% of pings were dropped during mininet.ping().\n' + 'Topo = SingleSwitchTopo, %s hosts\n' + 'hopts = %s\n' + 'lopts = %s\n' + 'host = CPULimitedHost\n' + 'link = TCLink\n' + 'Switch = %s\n' + % ( dropped, n, hoptsStr, loptsStr, self.switchClass ) ) + + self.assertEqual( dropped, 0, msg=msg ) + + def assertWithinTolerance( self, measured, expected, tolerance_frac, msg ): + """Check that a given value is within a tolerance of expected + tolerance_frac: less-than-1.0 value; 0.8 would yield 20% tolerance. + """ + upperBound = ( float( expected ) + ( 1 - tolerance_frac ) * + float( expected ) ) + lowerBound = float( expected ) * tolerance_frac + info = ( 'measured value is out of bounds\n' + 'expected value: %s\n' + 'measured value: %s\n' + 'failure tolerance: %s\n' + 'upper bound: %s\n' + 'lower bound: %s\n' + % ( expected, measured, tolerance_frac, + upperBound, lowerBound ) ) + msg += info + + self.assertGreaterEqual( float( measured ), lowerBound, msg=msg ) + self.assertLessEqual( float( measured ), upperBound, msg=msg ) + + def testCPULimits( self ): + "Verify topology creation with CPU limits set for both schedulers." + CPU_FRACTION = 0.1 + CPU_TOLERANCE = 0.8 # CPU fraction below which test should fail + hopts = { 'cpu': CPU_FRACTION } + # self.runOptionsTopoTest( N, hopts=hopts ) + + mn = Mininet( SingleSwitchOptionsTopo( n=N, hopts=hopts ), + host=CPULimitedHost, switch=self.switchClass, + waitConnected=True ) + mn.start() + results = mn.runCpuLimitTest( cpu=CPU_FRACTION ) + mn.stop() + hostUsage = '\n'.join( 'h%s: %s' % + ( n + 1, + results[ (n - 1) * 5 : (n * 5) - 1 ] ) + for n in range( N ) ) + hoptsStr = ', '.join( '%s: %s' % ( opt, value ) + for opt, value in hopts.items() ) + msg = ( '\nTesting cpu limited to %d%% of cpu per host\n' + 'cpu usage percent per host:\n%s\n' + 'Topo = SingleSwitchTopo, %s hosts\n' + 'hopts = %s\n' + 'host = CPULimitedHost\n' + 'Switch = %s\n' + % ( CPU_FRACTION * 100, hostUsage, N, hoptsStr, + self.switchClass ) ) + for pct in results: + # divide cpu by 100 to convert from percentage to fraction + self.assertWithinTolerance( pct/100, CPU_FRACTION, + CPU_TOLERANCE, msg ) + + def testLinkBandwidth( self ): + "Verify that link bandwidths are accurate within a bound." + if self.switchClass is UserSwitch: + self.skipTest( 'UserSwitch has very poor performance -' + ' skipping for now' ) + BW = 5 # Mbps + BW_TOLERANCE = 0.8 # BW fraction below which test should fail + # Verify ability to create limited-link topo first; + lopts = { 'bw': BW, 'use_htb': True } + # Also verify correctness of limit limiting within a bound. + mn = Mininet( SingleSwitchOptionsTopo( n=N, lopts=lopts ), + link=TCLink, switch=self.switchClass, + waitConnected=True ) + bw_strs = mn.run( mn.iperf, fmt='m' ) + loptsStr = ', '.join( '%s: %s' % ( opt, value ) + for opt, value in lopts.items() ) + msg = ( '\nTesting link bandwidth limited to %d Mbps per link\n' + 'iperf results[ client, server ]: %s\n' + 'Topo = SingleSwitchTopo, %s hosts\n' + 'Link = TCLink\n' + 'lopts = %s\n' + 'host = default\n' + 'switch = %s\n' + % ( BW, bw_strs, N, loptsStr, self.switchClass ) ) + + # On the client side, iperf doesn't wait for ACKs - it simply + # reports how long it took to fill up the TCP send buffer. + # As long as the kernel doesn't wait a long time before + # delivering bytes to the iperf server, its reported data rate + # should be close to the actual receive rate. + serverRate, _clientRate = bw_strs + bw = float( serverRate.split(' ')[0] ) + self.assertWithinTolerance( bw, BW, BW_TOLERANCE, msg ) + + def testLinkDelay( self ): + "Verify that link delays are accurate within a bound." + DELAY_MS = 15 + DELAY_TOLERANCE = 0.8 # Delay fraction below which test should fail + REPS = 3 + lopts = { 'delay': '%sms' % DELAY_MS, 'use_htb': True } + mn = Mininet( SingleSwitchOptionsTopo( n=N, lopts=lopts ), + link=TCLink, switch=self.switchClass, autoStaticArp=True, + waitConnected=True ) + mn.start() + for _ in range( REPS ): + ping_delays = mn.pingFull() + mn.stop() + test_outputs = ping_delays[0] + # Ignore unused variables below + # pylint: disable=W0612 + node, dest, ping_outputs = test_outputs + sent, received, rttmin, rttavg, rttmax, rttdev = ping_outputs + pingFailMsg = 'sent %s pings, only received %s' % ( sent, received ) + self.assertEqual( sent, received, msg=pingFailMsg ) + # pylint: enable=W0612 + loptsStr = ', '.join( '%s: %s' % ( opt, value ) + for opt, value in lopts.items() ) + msg = ( '\nTesting Link Delay of %s ms\n' + 'ping results across 4 links:\n' + '(Sent, Received, rttmin, rttavg, rttmax, rttdev)\n' + '%s\n' + 'Topo = SingleSwitchTopo, %s hosts\n' + 'Link = TCLink\n' + 'lopts = %s\n' + 'host = default' + 'switch = %s\n' + % ( DELAY_MS, ping_outputs, N, loptsStr, self.switchClass ) ) + + for rttval in [rttmin, rttavg, rttmax]: + # Multiply delay by 4 to cover there & back on two links + self.assertWithinTolerance( rttval, DELAY_MS * 4.0, + DELAY_TOLERANCE, msg ) + + def testLinkLoss( self ): + "Verify that we see packet drops with a high configured loss rate." + LOSS_PERCENT = 99 + REPS = 1 + lopts = { 'loss': LOSS_PERCENT, 'use_htb': True } + mn = Mininet( topo=SingleSwitchOptionsTopo( n=N, lopts=lopts ), + host=CPULimitedHost, link=TCLink, + switch=self.switchClass, + waitConnected=True ) + # Drops are probabilistic, but the chance of no dropped packets is + # 1 in 100 million with 4 hops for a link w/99% loss. + dropped_total = 0 + mn.start() + for _ in range(REPS): + dropped_total += mn.ping(timeout='1') + mn.stop() + + loptsStr = ', '.join( '%s: %s' % ( opt, value ) + for opt, value in lopts.items() ) + msg = ( '\nTesting packet loss with %d%% loss rate\n' + 'number of dropped pings during mininet.ping(): %s\n' + 'expected number of dropped packets: 1\n' + 'Topo = SingleSwitchTopo, %s hosts\n' + 'Link = TCLink\n' + 'lopts = %s\n' + 'host = default\n' + 'switch = %s\n' + % ( LOSS_PERCENT, dropped_total, N, loptsStr, + self.switchClass ) ) + + self.assertGreater( dropped_total, 0, msg ) + + def testMostOptions( self ): + "Verify topology creation with most link options and CPU limits." + lopts = { 'bw': 10, 'delay': '5ms', 'use_htb': True } + hopts = { 'cpu': 0.5 / N } + msg = '\nTesting many cpu and link options\n' + self.runOptionsTopoTest( N, msg, hopts=hopts, lopts=lopts ) + +# pylint: enable=E1101 + +class testOptionsTopoOVSKernel( testOptionsTopoCommon, unittest.TestCase ): + """Verify ability to create networks with host and link options + (OVS kernel switch).""" + longMessage = True + switchClass = OVSSwitch + +@unittest.skip( 'Skipping OVS user switch test for now' ) +class testOptionsTopoOVSUser( testOptionsTopoCommon, unittest.TestCase ): + """Verify ability to create networks with host and link options + (OVS user switch).""" + longMessage = True + switchClass = partial( OVSSwitch, datapath='user' ) + +@unittest.skipUnless( quietRun( 'which ivs-ctl' ), 'IVS is not installed' ) +class testOptionsTopoIVS( testOptionsTopoCommon, unittest.TestCase ): + "Verify ability to create networks with host and link options (IVS)." + longMessage = True + switchClass = IVSSwitch + +@unittest.skipUnless( quietRun( 'which ofprotocol' ), + 'Reference user switch is not installed' ) +class testOptionsTopoUserspace( testOptionsTopoCommon, unittest.TestCase ): + """Verify ability to create networks with host and link options + (UserSwitch).""" + longMessage = True + switchClass = UserSwitch + + +if __name__ == '__main__': + setLogLevel( 'warning' ) + unittest.main() diff --git a/mininet/test/test_nets.py b/mininet/test/test_nets.py new file mode 100755 index 0000000..98048ec --- /dev/null +++ b/mininet/test/test_nets.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python + +"""Package: mininet + Test creation and all-pairs ping for each included mininet topo type.""" + +import unittest +import sys +from functools import partial + +from mininet.net import Mininet +from mininet.node import Host, Controller +from mininet.node import UserSwitch, OVSSwitch, IVSSwitch +from mininet.topo import SingleSwitchTopo, LinearTopo +from mininet.log import setLogLevel +from mininet.util import quietRun +from mininet.clean import cleanup + +# Tell pylint not to complain about calls to other class +# pylint: disable=E1101 + +class testSingleSwitchCommon( object ): + "Test ping with single switch topology (common code)." + + switchClass = None # overridden in subclasses + + @staticmethod + def tearDown(): + "Clean up if necessary" + if sys.exc_info() != ( None, None, None ): + cleanup() + + def testMinimal( self ): + "Ping test on minimal topology" + mn = Mininet( SingleSwitchTopo(), self.switchClass, Host, Controller, + waitConnected=True ) + dropped = mn.run( mn.ping ) + self.assertEqual( dropped, 0 ) + + def testSingle5( self ): + "Ping test on 5-host single-switch topology" + mn = Mininet( SingleSwitchTopo( k=5 ), self.switchClass, Host, + Controller, waitConnected=True ) + dropped = mn.run( mn.ping ) + self.assertEqual( dropped, 0 ) + +# pylint: enable=E1101 + +class testSingleSwitchOVSKernel( testSingleSwitchCommon, unittest.TestCase ): + "Test ping with single switch topology (OVS kernel switch)." + switchClass = OVSSwitch + +class testSingleSwitchOVSUser( testSingleSwitchCommon, unittest.TestCase ): + "Test ping with single switch topology (OVS user switch)." + switchClass = partial( OVSSwitch, datapath='user' ) + +@unittest.skipUnless( quietRun( 'which ivs-ctl' ), 'IVS is not installed' ) +class testSingleSwitchIVS( testSingleSwitchCommon, unittest.TestCase ): + "Test ping with single switch topology (IVS switch)." + switchClass = IVSSwitch + +@unittest.skipUnless( quietRun( 'which ofprotocol' ), + 'Reference user switch is not installed' ) +class testSingleSwitchUserspace( testSingleSwitchCommon, unittest.TestCase ): + "Test ping with single switch topology (Userspace switch)." + switchClass = UserSwitch + + +# Tell pylint not to complain about calls to other class +# pylint: disable=E1101 + +class testLinearCommon( object ): + "Test all-pairs ping with LinearNet (common code)." + + switchClass = None # overridden in subclasses + + def testLinear5( self ): + "Ping test on a 5-switch topology" + mn = Mininet( LinearTopo( k=5 ), self.switchClass, Host, + Controller, waitConnected=True ) + dropped = mn.run( mn.ping ) + self.assertEqual( dropped, 0 ) + +# pylint: enable=E1101 + + +class testLinearOVSKernel( testLinearCommon, unittest.TestCase ): + "Test all-pairs ping with LinearNet (OVS kernel switch)." + switchClass = OVSSwitch + +class testLinearOVSUser( testLinearCommon, unittest.TestCase ): + "Test all-pairs ping with LinearNet (OVS user switch)." + switchClass = partial( OVSSwitch, datapath='user' ) + +@unittest.skipUnless( quietRun( 'which ivs-ctl' ), 'IVS is not installed' ) +class testLinearIVS( testLinearCommon, unittest.TestCase ): + "Test all-pairs ping with LinearNet (IVS switch)." + switchClass = IVSSwitch + +@unittest.skipUnless( quietRun( 'which ofprotocol' ), + 'Reference user switch is not installed' ) +class testLinearUserspace( testLinearCommon, unittest.TestCase ): + "Test all-pairs ping with LinearNet (Userspace switch)." + switchClass = UserSwitch + + +if __name__ == '__main__': + setLogLevel( 'warning' ) + unittest.main() diff --git a/mininet/test/test_ptyleak.py b/mininet/test/test_ptyleak.py new file mode 100755 index 0000000..813be76 --- /dev/null +++ b/mininet/test/test_ptyleak.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python + +""" +Regression test for pty leak in Node() +""" + +import unittest + +from mininet.net import Mininet +from mininet.clean import cleanup +from mininet.topo import SingleSwitchTopo + +class TestPtyLeak( unittest.TestCase ): + "Verify that there is no pty leakage" + + @staticmethod + def testPtyLeak(): + "Test for pty leakage" + net = Mininet( SingleSwitchTopo() ) + net.start() + host = net[ 'h1' ] + for _ in range( 0, 10 ): + oldptys = host.slave, host.master + net.delHost( host ) + host = net.addHost( 'h1' ) + assert ( host.slave, host.master ) == oldptys + net.stop() + + +if __name__ == '__main__': + unittest.main() + cleanup() diff --git a/mininet/test/test_switchdpidassignment.py b/mininet/test/test_switchdpidassignment.py new file mode 100755 index 0000000..83a2e45 --- /dev/null +++ b/mininet/test/test_switchdpidassignment.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python + +"""Package: mininet + Regression tests for switch dpid assignment.""" + +import unittest +import sys + +from mininet.net import Mininet +from mininet.node import Host, Controller +from mininet.node import ( UserSwitch, OVSSwitch, IVSSwitch ) +from mininet.topo import Topo +from mininet.log import setLogLevel +from mininet.util import quietRun +from mininet.clean import cleanup + + +class TestSwitchDpidAssignmentOVS( unittest.TestCase ): + "Verify Switch dpid assignment." + + switchClass = OVSSwitch # overridden in subclasses + + def tearDown( self ): + "Clean up if necessary" + # satisfy pylint + assert self + if sys.exc_info() != ( None, None, None ): + cleanup() + + def testDefaultDpid( self ): + """Verify that the default dpid is assigned using a valid provided + canonical switchname if no dpid is passed in switch creation.""" + net = Mininet( Topo(), self.switchClass, Host, Controller ) + switch = net.addSwitch( 's1' ) + self.assertEqual( switch.defaultDpid(), switch.dpid ) + net.stop() + + def dpidFrom( self, num ): + "Compute default dpid from number" + fmt = ( '%0' + str( self.switchClass.dpidLen ) + 'x' ) + return fmt % num + + def testActualDpidAssignment( self ): + """Verify that Switch dpid is the actual dpid assigned if dpid is + passed in switch creation.""" + dpid = self.dpidFrom( 0xABCD ) + net = Mininet( Topo(), self.switchClass, Host, Controller ) + switch = net.addSwitch( 's1', dpid=dpid ) + self.assertEqual( switch.dpid, dpid ) + net.stop() + + def testDefaultDpidAssignmentFailure( self ): + """Verify that Default dpid assignment raises an Exception if the + name of the switch does not contain a digit. Also verify the + exception message.""" + net = Mininet( Topo(), self.switchClass, Host, Controller ) + with self.assertRaises( Exception ) as raises_cm: + net.addSwitch( 'A' ) + self.assertTrue( 'Unable to derive ' + 'default datapath ID - please either specify a dpid ' + 'or use a canonical switch name such as s23.' + in str( raises_cm.exception ) ) + net.stop() + + def testDefaultDpidLen( self ): + """Verify that Default dpid length is 16 characters consisting of + 16 - len(hex of first string of contiguous digits passed in switch + name) 0's followed by hex of first string of contiguous digits passed + in switch name.""" + net = Mininet( Topo(), self.switchClass, Host, Controller ) + switch = net.addSwitch( 's123' ) + self.assertEqual( switch.dpid, self.dpidFrom( 123 ) ) + net.stop() + + +class OVSUser( OVSSwitch): + "OVS User Switch convenience class" + def __init__( self, *args, **kwargs ): + kwargs.update( datapath='user' ) + OVSSwitch.__init__( self, *args, **kwargs ) + +class testSwitchOVSUser( TestSwitchDpidAssignmentOVS ): + "Test dpid assignment of OVS User Switch." + switchClass = OVSUser + +@unittest.skipUnless( quietRun( 'which ivs-ctl' ), + 'IVS switch is not installed' ) +class testSwitchIVS( TestSwitchDpidAssignmentOVS ): + "Test dpid assignment of IVS switch." + switchClass = IVSSwitch + +@unittest.skipUnless( quietRun( 'which ofprotocol' ), + 'Reference user switch is not installed' ) +class testSwitchUserspace( TestSwitchDpidAssignmentOVS ): + "Test dpid assignment of Userspace switch." + switchClass = UserSwitch + + +if __name__ == '__main__': + setLogLevel( 'warning' ) + unittest.main() + cleanup() diff --git a/mininet/test/test_util.py b/mininet/test/test_util.py new file mode 100755 index 0000000..5e480b7 --- /dev/null +++ b/mininet/test/test_util.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python + +"""Package: mininet + Test functions defined in mininet.util.""" + +import unittest + +from mininet.util import quietRun + +class testQuietRun( unittest.TestCase ): + """Test quietRun that runs a command and returns its merged output from + STDOUT and STDIN""" + + @staticmethod + def getEchoCmd( n ): + "Return a command that will print n characters" + return "echo -n " + "x" * n + + def testEmpty( self ): + "Run a command that prints nothing" + output = quietRun(testQuietRun.getEchoCmd( 0 ) ) + self.assertEqual( 0, len( output ) ) + + def testOneRead( self ): + """Run a command whose output is entirely read on the first call if + each call reads at most 1024 characters + """ + for n in [ 42, 1024 ]: + output = quietRun( testQuietRun.getEchoCmd( n ) ) + self.assertEqual( n, len( output ) ) + + def testMultipleReads( self ): + "Run a command whose output is not entirely read on the first read" + for n in [ 1025, 4242 ]: + output = quietRun(testQuietRun.getEchoCmd( n ) ) + self.assertEqual( n, len( output ) ) + + +if __name__ == "__main__": + unittest.main() diff --git a/mininet/test/test_walkthrough.py b/mininet/test/test_walkthrough.py new file mode 100755 index 0000000..39be95f --- /dev/null +++ b/mininet/test/test_walkthrough.py @@ -0,0 +1,392 @@ +#!/usr/bin/env python + +""" +Tests for the Mininet Walkthrough + +TODO: missing xterm test +""" + +import os +import re +import unittest + +from sys import stdout + +from mininet.util import quietRun, pexpect, StrictVersion +from mininet.clean import cleanup + + +def tsharkVersion(): + "Return tshark version" + versionStr = quietRun( 'tshark -v' ) + versionMatch = re.findall( r'TShark[^\d]*(\d+.\d+.\d+)', versionStr ) + return versionMatch[ 0 ] + +# pylint doesn't understand pexpect.match, unfortunately! +# pylint:disable=maybe-no-member + +class testWalkthrough( unittest.TestCase ): + "Test Mininet walkthrough" + + prompt = 'mininet>' + + @staticmethod + def setup(): + "Be paranoid and run cleanup() before each test" + cleanup() + + # PART 1 + def testHelp( self ): + "Check the usage message" + p = pexpect.spawn( 'mn -h' ) + index = p.expect( [ 'Usage: mn', pexpect.EOF ] ) + self.assertEqual( index, 0 ) + + def testWireshark( self ): + "Use tshark to test the of dissector" + # Satisfy pylint + assert self + if StrictVersion( tsharkVersion() ) < StrictVersion( '1.12.0' ): + tshark = pexpect.spawn( 'tshark -i lo -R of' ) + else: + tshark = pexpect.spawn( 'tshark -i lo -Y openflow_v1' ) + tshark.expect( [ 'Capturing on lo', "Capturing on 'Loopback" ] ) + mn = pexpect.spawn( 'mn --test pingall' ) + mn.expect( '0% dropped' ) + tshark.expect( [ '74 Hello', '74 of_hello', '74 Type: OFPT_HELLO' ] ) + tshark.sendintr() + mn.expect( pexpect.EOF ) + tshark.expect( 'aptured' ) # 'xx packets captured' + tshark.expect( pexpect.EOF ) + + def testBasic( self ): + "Test basic CLI commands (help, nodes, net, dump)" + p = pexpect.spawn( 'mn -w' ) + p.expect( self.prompt ) + # help command + p.sendline( 'help' ) + index = p.expect( [ 'commands', self.prompt ] ) + self.assertEqual( index, 0, 'No output for "help" command') + # nodes command + p.sendline( 'nodes' ) + p.expect( r'([chs]\d ?){4}' ) + nodes = p.match.group( 0 ).split() + self.assertEqual( len( nodes ), 4, 'No nodes in "nodes" command') + p.expect( self.prompt ) + # net command + p.sendline( 'net' ) + expected = list( nodes ) + while len( expected ) > 0: + index = p.expect( expected ) + node = p.match.group( 0 ) + expected.remove( node ) + p.expect( '\n' ) + self.assertEqual( len( expected ), 0, '"nodes" and "net" differ') + p.expect( self.prompt ) + # dump command + p.sendline( 'dump' ) + expected = [ r'<\w+ (%s)' % n for n in nodes ] + actual = [] + for _ in nodes: + index = p.expect( expected ) + node = p.match.group( 1 ) + actual.append( node ) + p.expect( '\n' ) + self.assertEqual( actual.sort(), nodes.sort(), + '"nodes" and "dump" differ' ) + p.expect( self.prompt ) + p.sendline( 'exit' ) + p.wait() + + def testHostCommands( self ): + "Test ifconfig and ps on h1 and s1" + p = pexpect.spawn( 'mn -w' ) + p.expect( self.prompt ) + # Third pattern is a local interface beginning with 'eth' or 'en' + interfaces = [ r'h1-eth0[:\s]', r's1-eth1[:\s]', + r'[^-](eth|en)\w*\d[:\s]', r'lo[:\s]', + self.prompt ] + # h1 ifconfig + p.sendline( 'h1 ifconfig -a' ) + ifcount = 0 + while True: + index = p.expect( interfaces ) + if index in (0, 3): + ifcount += 1 + elif index == 1: + self.fail( 's1 interface displayed in "h1 ifconfig"' ) + elif index == 2: + self.fail( 'eth0 displayed in "h1 ifconfig"' ) + else: + break + self.assertEqual( ifcount, 2, 'Missing interfaces on h1') + # s1 ifconfig + p.sendline( 's1 ifconfig -a' ) + ifcount = 0 + while True: + index = p.expect( interfaces ) + if index == 0: + self.fail( 'h1 interface displayed in "s1 ifconfig"' ) + elif index in (1, 2, 3): + ifcount += 1 + else: + break + self.assertTrue( ifcount >= 3, 'Missing interfaces on s1') + # h1 ps + p.sendline( "h1 ps -a | egrep -v 'ps|grep'" ) + p.expect( self.prompt ) + h1Output = p.before + # s1 ps + p.sendline( "s1 ps -a | egrep -v 'ps|grep'" ) + p.expect( self.prompt ) + s1Output = p.before + # strip command from ps output and compute diffs + h1Output = h1Output.split( '\n' )[ 1: ] + s1Output = s1Output.split( '\n' )[ 1: ] + diffs = set( h1Output ).difference( set( s1Output ) ) + # allow up to two diffs to account for daemons, etc. + self.assertTrue( len( diffs ) <= 2, + 'h1 and s1 "ps" output differ too much: %s' % diffs ) + p.sendline( 'exit' ) + p.wait() + + def testConnectivity( self ): + "Test ping and pingall" + p = pexpect.spawn( 'mn -w' ) + p.expect( self.prompt ) + p.sendline( 'h1 ping -c 1 h2' ) + p.expect( '1 packets transmitted, 1 received' ) + p.expect( self.prompt ) + p.sendline( 'pingall' ) + p.expect( '0% dropped' ) + p.expect( self.prompt ) + p.sendline( 'exit' ) + p.wait() + + def testSimpleHTTP( self ): + "Start an HTTP server on h1 and wget from h2" + if 'Python 2' in quietRun( 'python --version' ): + httpserver = 'SimpleHTTPServer' + else: + httpserver = 'http.server' + p = pexpect.spawn( 'mn -w', logfile=stdout ) + p.expect( self.prompt ) + p.sendline( 'h1 python -m %s 80 >& /dev/null &' % httpserver ) + p.expect( self.prompt ) + # The walkthrough doesn't specify a delay here, and + # we also don't read the output (also a possible problem), + # but for now let's wait a number of seconds to make + # it less likely to fail due to the race condition. + p.sendline( 'px from mininet.util import waitListening;' + 'waitListening(h1, port=80, timeout=30)' ) + p.expect( self.prompt ) + p.sendline( ' h2 wget -O - h1' ) + p.expect( '200 OK' ) + p.expect( self.prompt ) + p.sendline( 'h1 kill %python' ) + p.expect( self.prompt ) + p.sendline( 'exit' ) + p.wait() + + # PART 2 + def testRegressionRun( self ): + "Test pingpair (0% drop) and iperf (bw > 0) regression tests" + # test pingpair + p = pexpect.spawn( 'mn --test pingpair' ) + p.expect( '0% dropped' ) + p.expect( pexpect.EOF ) + # test iperf + p = pexpect.spawn( 'mn --test iperf' ) + p.expect( r"Results: \['([\d\.]+) .bits/sec'," ) + bw = float( p.match.group( 1 ) ) + self.assertTrue( bw > 0 ) + p.expect( pexpect.EOF ) + + def testTopoChange( self ): + "Test pingall on single,3 and linear,4 topos" + # testing single,3 + p = pexpect.spawn( 'mn --test pingall --topo single,3' ) + p.expect( r'(\d+)/(\d+) received') + received = int( p.match.group( 1 ) ) + sent = int( p.match.group( 2 ) ) + self.assertEqual( sent, 6, 'Wrong number of pings sent in single,3' ) + self.assertEqual( sent, received, 'Dropped packets in single,3') + p.expect( pexpect.EOF ) + # testing linear,4 + p = pexpect.spawn( 'mn --test pingall --topo linear,4' ) + p.expect( r'(\d+)/(\d+) received') + received = int( p.match.group( 1 ) ) + sent = int( p.match.group( 2 ) ) + self.assertEqual( sent, 12, 'Wrong number of pings sent in linear,4' ) + self.assertEqual( sent, received, 'Dropped packets in linear,4') + p.expect( pexpect.EOF ) + + def testLinkChange( self ): + "Test TCLink bw and delay" + p = pexpect.spawn( 'mn -w --link tc,bw=10,delay=10ms' ) + p.expect( self.prompt ) + p.sendline( 'h1 route && ping -c1 h2' ) + # test bw + p.expect( self.prompt ) + p.sendline( 'iperf' ) + p.expect( r"Results: \['([\d\.]+) Mbits/sec'," ) + bw = float( p.match.group( 1 ) ) + self.assertTrue( bw < 10.1, 'Bandwidth %.2f >= 10.1 Mb/s' % bw ) + self.assertTrue( bw > 9.0, 'Bandwidth %.2f <= 9 Mb/s' % bw ) + p.expect( self.prompt ) + # test delay + p.sendline( 'h1 ping -c 4 h2' ) + p.expect( r'rtt min/avg/max/mdev = ' + r'([\d\.]+)/([\d\.]+)/([\d\.]+)/([\d\.]+) ms' ) + delay = float( p.match.group( 2 ) ) + self.assertTrue( delay >= 40, 'Delay < 40ms' ) + self.assertTrue( delay <= 50, 'Delay > 50ms' ) + p.expect( self.prompt ) + p.sendline( 'exit' ) + p.wait() + + def testVerbosity( self ): + "Test debug and output verbosity" + # test output + p = pexpect.spawn( 'mn -v output' ) + p.expect( self.prompt ) + self.assertEqual( len( p.before ), 0, 'Too much output for "output"' ) + p.sendline( 'exit' ) + p.wait() + # test debug + p = pexpect.spawn( 'mn -v debug --test none' ) + p.expect( pexpect.EOF ) + lines = p.before.split( '\n' ) + self.assertTrue( len( lines ) > 70, "Debug output is too short" ) + + def testCustomTopo( self ): + "Start Mininet using a custom topo, then run pingall" + # Satisfy pylint + assert self + custom = os.path.dirname( os.path.realpath( __file__ ) ) + custom = os.path.join( custom, '../../custom/topo-2sw-2host.py' ) + custom = os.path.normpath( custom ) + p = pexpect.spawn( + 'mn --custom %s --topo mytopo --test pingall' % custom ) + p.expect( '0% dropped' ) + p.expect( pexpect.EOF ) + + def testStaticMAC( self ): + "Verify that MACs are set to easy to read numbers" + p = pexpect.spawn( 'mn --mac' ) + p.expect( self.prompt ) + for i in range( 1, 3 ): + p.sendline( 'h%d ifconfig' % i ) + p.expect( r'\s00:00:00:00:00:0%d\s' % i ) + p.expect( self.prompt ) + p.sendline( 'exit' ) + p.expect( pexpect.EOF ) + + def testSwitches( self ): + "Run iperf test using user and ovsk switches" + switches = [ 'user', 'ovsk' ] + for sw in switches: + p = pexpect.spawn( 'mn --switch %s --test iperf' % sw ) + p.expect( r"Results: \['([\d\.]+) .bits/sec'," ) + bw = float( p.match.group( 1 ) ) + self.assertTrue( bw > 0 ) + p.expect( pexpect.EOF ) + + def testBenchmark( self ): + "Run benchmark and verify that it takes less than 2 seconds" + p = pexpect.spawn( 'mn --test none' ) + p.expect( r'completed in ([\d\.]+) seconds' ) + time = float( p.match.group( 1 ) ) + self.assertTrue( time < 2, 'Benchmark takes more than 2 seconds' ) + + def testOwnNamespace( self ): + "Test running user switch in its own namespace" + p = pexpect.spawn( 'mn --innamespace --switch user' ) + p.expect( self.prompt ) + interfaces = [ r'h1-eth0[:\s]', r's1-eth1[:\s]', + r'[^-](eth|en)\w*\d[:\s]', r'lo[:\s]', + self.prompt ] + p.sendline( 's1 ifconfig -a' ) + ifcount = 0 + while True: + index = p.expect( interfaces ) + if index in (1, 3): + ifcount += 1 + elif index == 0: + self.fail( 'h1 interface displayed in "s1 ifconfig"' ) + elif index == 2: + self.fail( 'eth0 displayed in "s1 ifconfig"' ) + else: + break + self.assertEqual( ifcount, 2, 'Missing interfaces on s1' ) + # verify that all hosts a reachable + p.sendline( 'pingall' ) + p.expect( r'(\d+)% dropped' ) + dropped = int( p.match.group( 1 ) ) + self.assertEqual( dropped, 0, 'pingall failed') + p.expect( self.prompt ) + p.sendline( 'exit' ) + p.wait() + + # PART 3 + def testPythonInterpreter( self ): + "Test py and px by checking IP for h1 and adding h3" + p = pexpect.spawn( 'mn -w' ) + p.expect( self.prompt ) + # test host IP + p.sendline( 'py h1.IP()' ) + p.expect( '10.0.0.1' ) + p.expect( self.prompt ) + # test adding host + p.sendline( "px net.addHost('h3')" ) + p.expect( self.prompt ) + p.sendline( "px net.addLink(s1, h3)" ) + p.expect( self.prompt ) + p.sendline( 'net' ) + p.expect( 'h3' ) + p.expect( self.prompt ) + p.sendline( 'py h3.MAC()' ) + p.expect( '([a-f0-9]{2}:?){6}' ) + p.expect( self.prompt ) + p.sendline( 'exit' ) + p.wait() + + def testLink( self ): + "Test link CLI command using ping" + p = pexpect.spawn( 'mn -w' ) + p.expect( self.prompt ) + p.sendline( 'link s1 h1 down' ) + p.expect( self.prompt ) + p.sendline( 'h1 ping -c 1 h2' ) + p.expect( 'unreachable' ) + p.expect( self.prompt ) + p.sendline( 'link s1 h1 up' ) + p.expect( self.prompt ) + p.sendline( 'h1 ping -c 1 h2' ) + p.expect( '0% packet loss' ) + p.expect( self.prompt ) + p.sendline( 'exit' ) + p.wait() + + @unittest.skipUnless( os.path.exists( '/tmp/pox' ) or + '1 received' in quietRun( 'ping -c 1 github.com' ), + 'Github is not reachable; cannot download Pox' ) + def testRemoteController( self ): + "Test Mininet using Pox controller" + # Satisfy pylint + assert self + if not os.path.exists( '/tmp/pox' ): + p = pexpect.spawn( + 'git clone https://github.com/noxrepo/pox.git /tmp/pox' ) + p.expect( pexpect.EOF ) + pox = pexpect.spawn( '/tmp/pox/pox.py forwarding.l2_learning' ) + net = pexpect.spawn( + 'mn --controller=remote,ip=127.0.0.1,port=6633 --test pingall' ) + net.expect( '0% dropped' ) + net.expect( pexpect.EOF ) + pox.sendintr() + pox.wait() + + +if __name__ == '__main__': + unittest.main() diff --git a/mininet/topo.py b/mininet/topo.py new file mode 100644 index 0000000..f79fe23 --- /dev/null +++ b/mininet/topo.py @@ -0,0 +1,358 @@ +#!/usr/bin/env python +"""@package topo + +Network topology creation. + +@author Brandon Heller (brandonh@stanford.edu) + +This package includes code to represent network topologies. + +A Topo object can be a topology database for NOX, can represent a physical +setup for testing, and can even be emulated with the Mininet package. +""" + +from mininet.util import irange, natural, naturalSeq + +# pylint: disable=too-many-arguments + + +class MultiGraph( object ): + "Utility class to track nodes and edges - replaces networkx.MultiGraph" + + def __init__( self ): + self.node = {} + self.edge = {} + + def add_node( self, node, attr_dict=None, **attrs): + """Add node to graph + attr_dict: attribute dict (optional) + attrs: more attributes (optional) + warning: updates attr_dict with attrs""" + attr_dict = {} if attr_dict is None else attr_dict + attr_dict.update( attrs ) + self.node[ node ] = attr_dict + + def add_edge( self, src, dst, key=None, attr_dict=None, **attrs ): + """Add edge to graph + key: optional key + attr_dict: optional attribute dict + attrs: more attributes + warning: updates attr_dict with attrs""" + attr_dict = {} if attr_dict is None else attr_dict + attr_dict.update( attrs ) + self.node.setdefault( src, {} ) + self.node.setdefault( dst, {} ) + self.edge.setdefault( src, {} ) + self.edge.setdefault( dst, {} ) + self.edge[ src ].setdefault( dst, {} ) + entry = self.edge[ dst ][ src ] = self.edge[ src ][ dst ] + # If no key, pick next ordinal number + if key is None: + keys = [ k for k in entry.keys() if isinstance( k, int ) ] + key = max( [ 0 ] + keys ) + 1 + entry[ key ] = attr_dict + return key + + def nodes( self, data=False): + """Return list of graph nodes + data: return list of ( node, attrs)""" + return self.node.items() if data else self.node.keys() + + def edges_iter( self, data=False, keys=False ): + "Iterator: return graph edges, optionally with data and keys" + for src, entry in self.edge.items(): + for dst, entrykeys in entry.items(): + if src > dst: + # Skip duplicate edges + continue + for k, attrs in entrykeys.items(): + if data: + if keys: + yield( src, dst, k, attrs ) + else: + yield( src, dst, attrs ) + else: + if keys: + yield( src, dst, k ) + else: + yield( src, dst ) + + def edges( self, data=False, keys=False ): + "Return list of graph edges" + return list( self.edges_iter( data=data, keys=keys ) ) + + def __getitem__( self, node ): + "Return link dict for given src node" + return self.edge[ node ] + + def __len__( self ): + "Return the number of nodes" + return len( self.node ) + + def convertTo( self, cls, data=False, keys=False ): + """Convert to a new object of networkx.MultiGraph-like class cls + data: include node and edge data + keys: include edge keys as well as edge data""" + g = cls() + g.add_nodes_from( self.nodes( data=data ) ) + g.add_edges_from( self.edges( data=( data or keys ), keys=keys ) ) + return g + + +class Topo( object ): + "Data center network representation for structured multi-trees." + + def __init__( self, *args, **params ): + """Topo object. + Optional named parameters: + hinfo: default host options + sopts: default switch options + lopts: default link options + calls build()""" + self.g = MultiGraph() + self.hopts = params.pop( 'hopts', {} ) + self.sopts = params.pop( 'sopts', {} ) + self.lopts = params.pop( 'lopts', {} ) + # ports[src][dst][sport] is port on dst that connects to src + self.ports = {} + self.build( *args, **params ) + + def build( self, *args, **params ): + "Override this method to build your topology." + pass + + def addNode( self, name, **opts ): + """Add Node to graph. + name: name + opts: node options + returns: node name""" + self.g.add_node( name, **opts ) + return name + + def addHost( self, name, **opts ): + """Convenience method: Add host to graph. + name: host name + opts: host options + returns: host name""" + if not opts and self.hopts: + opts = self.hopts + return self.addNode( name, **opts ) + + def addSwitch( self, name, **opts ): + """Convenience method: Add switch to graph. + name: switch name + opts: switch options + returns: switch name""" + if not opts and self.sopts: + opts = self.sopts + result = self.addNode( name, isSwitch=True, **opts ) + return result + + def addLink( self, node1, node2, port1=None, port2=None, + key=None, **opts ): + """node1, node2: nodes to link together + port1, port2: ports (optional) + opts: link options (optional) + returns: link info key""" + if not opts and self.lopts: + opts = self.lopts + port1, port2 = self.addPort( node1, node2, port1, port2 ) + opts = dict( opts ) + opts.update( node1=node1, node2=node2, port1=port1, port2=port2 ) + return self.g.add_edge(node1, node2, key, opts ) + + def nodes( self, sort=True ): + "Return nodes in graph" + if sort: + return self.sorted( self.g.nodes() ) + else: + return self.g.nodes() + + def isSwitch( self, n ): + "Returns true if node is a switch." + return self.g.node[ n ].get( 'isSwitch', False ) + + def switches( self, sort=True ): + """Return switches. + sort: sort switches alphabetically + returns: dpids list of dpids""" + return [ n for n in self.nodes( sort ) if self.isSwitch( n ) ] + + def hosts( self, sort=True ): + """Return hosts. + sort: sort hosts alphabetically + returns: list of hosts""" + return [ n for n in self.nodes( sort ) if not self.isSwitch( n ) ] + + def iterLinks( self, withKeys=False, withInfo=False ): + """Return links (iterator) + withKeys: return link keys + withInfo: return link info + returns: list of ( src, dst [,key, info ] )""" + for _src, _dst, key, info in self.g.edges_iter( data=True, keys=True ): + node1, node2 = info[ 'node1' ], info[ 'node2' ] + if withKeys: + if withInfo: + yield( node1, node2, key, info ) + else: + yield( node1, node2, key ) + else: + if withInfo: + yield( node1, node2, info ) + else: + yield( node1, node2 ) + + def links( self, sort=False, withKeys=False, withInfo=False ): + """Return links + sort: sort links alphabetically, preserving (src, dst) order + withKeys: return link keys + withInfo: return link info + returns: list of ( src, dst [,key, info ] )""" + links = list( self.iterLinks( withKeys, withInfo ) ) + if not sort: + return links + # Ignore info when sorting + tupleSize = 3 if withKeys else 2 + return sorted( links, key=( lambda l: naturalSeq( l[ :tupleSize ] ) ) ) + + # This legacy port management mechanism is clunky and will probably + # be removed at some point. + + def addPort( self, src, dst, sport=None, dport=None ): + """Generate port mapping for new edge. + src: source switch name + dst: destination switch name""" + # Initialize if necessary + ports = self.ports + ports.setdefault( src, {} ) + ports.setdefault( dst, {} ) + # New port: number of outlinks + base + if sport is None: + src_base = 1 if self.isSwitch( src ) else 0 + sport = len( ports[ src ] ) + src_base + if dport is None: + dst_base = 1 if self.isSwitch( dst ) else 0 + dport = len( ports[ dst ] ) + dst_base + ports[ src ][ sport ] = ( dst, dport ) + ports[ dst ][ dport ] = ( src, sport ) + return sport, dport + + def port( self, src, dst ): + """Get port numbers. + src: source switch name + dst: destination switch name + sport: optional source port (otherwise use lowest src port) + returns: tuple (sport, dport), where + sport = port on source switch leading to the destination switch + dport = port on destination switch leading to the source switch + Note that you can also look up ports using linkInfo()""" + # A bit ugly and slow vs. single-link implementation ;-( + ports = [ ( sport, entry[ 1 ] ) + for sport, entry in self.ports[ src ].items() + if entry[ 0 ] == dst ] + return ports if len( ports ) != 1 else ports[ 0 ] + + def _linkEntry( self, src, dst, key=None ): + "Helper function: return link entry and key" + entry = self.g[ src ][ dst ] + if key is None: + key = min( entry ) + return entry, key + + def linkInfo( self, src, dst, key=None ): + "Return link metadata dict" + entry, key = self._linkEntry( src, dst, key ) + return entry[ key ] + + def setlinkInfo( self, src, dst, info, key=None ): + "Set link metadata dict" + entry, key = self._linkEntry( src, dst, key ) + entry[ key ] = info + + def nodeInfo( self, name ): + "Return metadata (dict) for node" + return self.g.node[ name ] + + def setNodeInfo( self, name, info ): + "Set metadata (dict) for node" + self.g.node[ name ] = info + + def convertTo( self, cls, data=True, keys=True ): + """Convert to a new object of networkx.MultiGraph-like class cls + data: include node and edge data (default True) + keys: include edge keys as well as edge data (default True)""" + return self.g.convertTo( cls, data=data, keys=keys ) + + @staticmethod + def sorted( items ): + "Items sorted in natural (i.e. alphabetical) order" + return sorted( items, key=natural ) + + +# Our idiom defines additional parameters in build(param...) +# pylint: disable=arguments-differ + +class SingleSwitchTopo( Topo ): + "Single switch connected to k hosts." + + def build( self, k=2, **_opts ): + "k: number of hosts" + self.k = k + switch = self.addSwitch( 's1' ) + for h in irange( 1, k ): + host = self.addHost( 'h%s' % h ) + self.addLink( host, switch ) + + +class SingleSwitchReversedTopo( Topo ): + """Single switch connected to k hosts, with reversed ports. + The lowest-numbered host is connected to the highest-numbered port. + Useful to verify that Mininet properly handles custom port + numberings.""" + + def build( self, k=2 ): + "k: number of hosts" + self.k = k + switch = self.addSwitch( 's1' ) + for h in irange( 1, k ): + host = self.addHost( 'h%s' % h ) + self.addLink( host, switch, + port1=0, port2=( k - h + 1 ) ) + + +class MinimalTopo( SingleSwitchTopo ): + "Minimal topology with two hosts and one switch" + def build( self ): + return SingleSwitchTopo.build( self, k=2 ) + + +class LinearTopo( Topo ): + "Linear topology of k switches, with n hosts per switch." + + def build( self, k=2, n=1, **_opts): + """k: number of switches + n: number of hosts per switch""" + self.k = k + self.n = n + + if n == 1: + def genHostName( i, _j ): + return 'h%s' % i + else: + def genHostName( i, j ): + return 'h%ss%d' % ( j, i ) + + lastSwitch = None + for i in irange( 1, k ): + # Add switch + switch = self.addSwitch( 's%s' % i ) + # Add hosts to switch + for j in irange( 1, n ): + host = self.addHost( genHostName( i, j ) ) + self.addLink( host, switch ) + # Connect switch to previous + if lastSwitch: + self.addLink( switch, lastSwitch ) + lastSwitch = switch + +# pylint: enable=arguments-differ diff --git a/mininet/topolib.py b/mininet/topolib.py new file mode 100644 index 0000000..d60e59a --- /dev/null +++ b/mininet/topolib.py @@ -0,0 +1,84 @@ +"Library of potentially useful topologies for Mininet" + +from mininet.topo import Topo +from mininet.net import Mininet + +# The build() method is expected to do this: +# pylint: disable=arguments-differ + +class TreeTopo( Topo ): + "Topology for a tree network with a given depth and fanout." + + def build( self, depth=1, fanout=2 ): + # Numbering: h1..N, s1..M + self.hostNum = 1 + self.switchNum = 1 + # Build topology + self.addTree( depth, fanout ) + + def addTree( self, depth, fanout ): + """Add a subtree starting with node n. + returns: last node added""" + isSwitch = depth > 0 + if isSwitch: + node = self.addSwitch( 's%s' % self.switchNum ) + self.switchNum += 1 + for _ in range( fanout ): + child = self.addTree( depth - 1, fanout ) + self.addLink( node, child ) + else: + node = self.addHost( 'h%s' % self.hostNum ) + self.hostNum += 1 + return node + + +def TreeNet( depth=1, fanout=2, **kwargs ): + "Convenience function for creating tree networks." + topo = TreeTopo( depth, fanout ) + return Mininet( topo, **kwargs ) + + +class TorusTopo( Topo ): + """2-D Torus topology + WARNING: this topology has LOOPS and WILL NOT WORK + with the default controller or any Ethernet bridge + without STP turned on! It can be used with STP, e.g.: + # mn --topo torus,3,3 --switch lxbr,stp=1 --test pingall""" + + def build( self, x, y, n=1 ): + """x: dimension of torus in x-direction + y: dimension of torus in y-direction + n: number of hosts per switch""" + if x < 3 or y < 3: + raise Exception( 'Please use 3x3 or greater for compatibility ' + 'with 2.1' ) + if n == 1: + def genHostName( loc, _k ): + return 'h%s' % ( loc ) + else: + def genHostName( loc, k ): + return 'h%sx%d' % ( loc, k ) + + hosts, switches, dpid = {}, {}, 0 + # Create and wire interior + for i in range( 0, x ): + for j in range( 0, y ): + loc = '%dx%d' % ( i + 1, j + 1 ) + # dpid cannot be zero for OVS + dpid = ( i + 1 ) * 256 + ( j + 1 ) + switch = switches[ i, j ] = self.addSwitch( + 's' + loc, dpid='%x' % dpid ) + for k in range( 0, n ): + host = hosts[ i, j, k ] = self.addHost( + genHostName( loc, k + 1 ) ) + self.addLink( host, switch ) + # Connect switches + for i in range( 0, x ): + for j in range( 0, y ): + sw1 = switches[ i, j ] + sw2 = switches[ i, ( j + 1 ) % y ] + sw3 = switches[ ( i + 1 ) % x, j ] + self.addLink( sw1, sw2 ) + self.addLink( sw1, sw3 ) + +# pylint: enable=arguments-differ diff --git a/mininet/util.py b/mininet/util.py new file mode 100644 index 0000000..e776e9f --- /dev/null +++ b/mininet/util.py @@ -0,0 +1,741 @@ +"Utility functions for Mininet." + +import codecs +import os +import re +import sys + +from collections import namedtuple +from fcntl import fcntl, F_GETFL, F_SETFL +from functools import partial +from os import O_NONBLOCK +from resource import getrlimit, setrlimit, RLIMIT_NPROC, RLIMIT_NOFILE +from select import poll, POLLIN, POLLHUP +from subprocess import call, check_call, Popen, PIPE, STDOUT +from sys import exit # pylint: disable=redefined-builtin +from time import sleep + +from mininet.log import output, info, error, warn, debug + +# pylint: disable=too-many-arguments + + +# Python 2/3 compatibility + +Python3 = sys.version_info[0] == 3 +BaseString = str if Python3 else getattr( str, '__base__' ) +Encoding = 'utf-8' if Python3 else None +class NullCodec( object ): + "Null codec for Python 2" + @staticmethod + def decode( buf ): + "Null decode" + return buf + + @staticmethod + def encode( buf ): + "Null encode" + return buf + + +if Python3: + def decode( buf ): + "Decode buffer for Python 3" + return buf.decode( Encoding ) + + def encode( buf ): + "Encode buffer for Python 3" + return buf.encode( Encoding ) + getincrementaldecoder = codecs.getincrementaldecoder( Encoding ) + +else: + decode, encode = NullCodec.decode, NullCodec.encode + + def getincrementaldecoder(): + "Return null codec for Python 2" + return NullCodec + +try: + import packaging.version # replacement for distutils.version + StrictVersion = packaging.version.parse +except ImportError: # python2.7 lacks ModuleNotFoundError + import distutils.version # pylint: disable=deprecated-module + StrictVersion = distutils.version.StrictVersion + +try: + oldpexpect = None + import pexpect as oldpexpect # pylint: disable=import-error + + class Pexpect( object ): + "Custom pexpect that is compatible with str" + @staticmethod + def spawn( *args, **kwargs): + "pexpect.spawn that is compatible with str" + if Python3 and 'encoding' not in kwargs: + kwargs.update( encoding='utf-8' ) + return oldpexpect.spawn( *args, **kwargs ) + + def __getattr__( self, name ): + return getattr( oldpexpect, name ) + pexpect = Pexpect() +except ImportError: + pass + + +# Command execution support + +def run( cmd ): + """Simple interface to subprocess.call() + cmd: list of command params""" + return call( cmd.split( ' ' ) ) + +def checkRun( cmd ): + """Simple interface to subprocess.check_call() + cmd: list of command params""" + return check_call( cmd.split( ' ' ) ) + +# pylint doesn't understand explicit type checking +# pylint: disable=maybe-no-member + +def oldQuietRun( *cmd ): + """Run a command, routing stderr to stdout, and return the output. + cmd: list of command params""" + if len( cmd ) == 1: + cmd = cmd[ 0 ] + if isinstance( cmd, BaseString ): + cmd = cmd.split( ' ' ) + out = '' + popen = Popen( # pylint: disable=consider-using-with + cmd, stdout=PIPE, stderr=STDOUT ) + # We can't use Popen.communicate() because it uses + # select(), which can't handle + # high file descriptor numbers! poll() can, however. + readable = poll() + readable.register( popen.stdout ) + while True: + while readable.poll(): + data = popen.stdout.read( 1024 ) + if len( data ) == 0: + break + out += data + popen.poll() + if popen.returncode is not None: + break + return out + + +# This is a bit complicated, but it enables us to +# monitor command output as it is happening + +CmdResult = namedtuple( 'CmdResult', 'out err ret' ) + +# pylint: disable=too-many-branches,too-many-statements +def errRun( *cmd, **kwargs ): + """Run a command and return stdout, stderr and return code + cmd: string or list of command and args + stderr: STDOUT to merge stderr with stdout + shell: run command using shell + echo: monitor output to console""" + # By default we separate stderr, don't run in a shell, and don't echo + stderr = kwargs.get( 'stderr', PIPE ) + shell = kwargs.get( 'shell', False ) + echo = kwargs.get( 'echo', False ) + if echo: + # cmd goes to stderr, output goes to stdout + info( cmd, '\n' ) + if len( cmd ) == 1: + cmd = cmd[ 0 ] + # Allow passing in a list or a string + if isinstance( cmd, BaseString ) and not shell: + cmd = cmd.split( ' ' ) + cmd = [ str( arg ) for arg in cmd ] + elif isinstance( cmd, list ) and shell: + cmd = " ".join( arg for arg in cmd ) + debug( '*** errRun:', cmd, '\n' ) + # pylint: disable=consider-using-with + popen = Popen( cmd, stdout=PIPE, stderr=stderr, shell=shell ) + # We use poll() because select() doesn't work with large fd numbers, + # and thus communicate() doesn't work either + out, err = '', '' + poller = poll() + poller.register( popen.stdout, POLLIN ) + fdToFile = { popen.stdout.fileno(): popen.stdout } + fdToDecoder = { popen.stdout.fileno(): getincrementaldecoder() } + outDone, errDone = False, True + if popen.stderr: + fdToFile[ popen.stderr.fileno() ] = popen.stderr + fdToDecoder[ popen.stderr.fileno() ] = getincrementaldecoder() + poller.register( popen.stderr, POLLIN ) + errDone = False + while not outDone or not errDone: + readable = poller.poll() + for fd, event in readable: + f = fdToFile[ fd ] + decoder = fdToDecoder[ fd ] + if event & ( POLLIN | POLLHUP ): + data = decoder.decode( f.read( 1024 ) ) + if echo: + output( data ) + if f == popen.stdout: + out += data + if data == '': + outDone = True + elif f == popen.stderr: + err += data + if data == '': + errDone = True + else: # something unexpected + if f == popen.stdout: + outDone = True + elif f == popen.stderr: + errDone = True + poller.unregister( fd ) + + returncode = popen.wait() + # Python 3 complains if we don't explicitly close these + popen.stdout.close() + if stderr == PIPE: + popen.stderr.close() + debug( out, err, returncode ) + return CmdResult( out, err, returncode ) + +# pylint: enable=too-many-branches + +def errFail( *cmd, **kwargs ): + "Run a command using errRun and raise exception on nonzero exit" + out, err, ret = errRun( *cmd, **kwargs ) + if ret: + raise Exception( "errFail: %s failed with return code %s: %s" + % ( cmd, ret, err ) ) + return CmdResult( out, err, ret ) + +def quietRun( cmd, **kwargs ): + "Run a command and return merged stdout and stderr" + return errRun( cmd, stderr=STDOUT, **kwargs ).out + +def which(cmd, **kwargs ): + "Run a command and return merged stdout and stderr" + out, _, ret = errRun( ["which", cmd], stderr=STDOUT, **kwargs ) + return out.rstrip() if ret == 0 else None + +# pylint: enable=maybe-no-member + +def isShellBuiltin( cmd ): + "Return True if cmd is a bash builtin." + if isShellBuiltin.builtIns is None: + isShellBuiltin.builtIns = set(quietRun( 'bash -c enable' ).split()) + space = cmd.find( ' ' ) + if space > 0: + cmd = cmd[ :space] + return cmd in isShellBuiltin.builtIns + + +isShellBuiltin.builtIns = None + + +# Interface management +# +# Interfaces are managed as strings which are simply the +# interface names, of the form 'nodeN-ethM'. +# +# To connect nodes, we create a pair of veth interfaces, and then place them +# in the pair of nodes that we want to communicate. We then update the node's +# list of interfaces and connectivity map. +# +# For the kernel datapath, switch interfaces +# live in the root namespace and thus do not have to be +# explicitly moved. + +def makeIntfPair( intf1, intf2, addr1=None, addr2=None, node1=None, node2=None, + deleteIntfs=True, runCmd=None ): + """Make a veth pair connnecting new interfaces intf1 and intf2 + intf1: name for interface 1 + intf2: name for interface 2 + addr1: MAC address for interface 1 (optional) + addr2: MAC address for interface 2 (optional) + node1: home node for interface 1 (optional) + node2: home node for interface 2 (optional) + deleteIntfs: delete intfs before creating them + runCmd: function to run shell commands (quietRun) + raises Exception on failure""" + if not runCmd: + runCmd = quietRun if not node1 else node1.cmd + runCmd2 = quietRun if not node2 else node2.cmd + if deleteIntfs: + # Delete any old interfaces with the same names + runCmd( 'ip link del ' + intf1 ) + runCmd2( 'ip link del ' + intf2 ) + # Create new pair + netns = 1 if not node2 else node2.pid + if addr1 is None and addr2 is None: + cmdOutput = runCmd( 'ip link add name %s ' + 'type veth peer name %s ' + 'netns %s' % ( intf1, intf2, netns ) ) + else: + cmdOutput = runCmd( 'ip link add name %s ' + 'address %s ' + 'type veth peer name %s ' + 'address %s ' + 'netns %s' % + ( intf1, addr1, intf2, addr2, netns ) ) + if cmdOutput: + raise Exception( "Error creating interface pair (%s,%s): %s " % + ( intf1, intf2, cmdOutput ) ) + +def retry( retries, delaySecs, fn, *args, **keywords ): + """Try something several times before giving up. + n: number of times to retry + delaySecs: wait this long between tries + fn: function to call + args: args to apply to function call""" + tries = 0 + while not fn( *args, **keywords ) and tries < retries: + sleep( delaySecs ) + tries += 1 + if tries >= retries: + error( "*** gave up after %i retries\n" % tries ) + exit( 1 ) + +def moveIntfNoRetry( intf, dstNode, printError=False ): + """Move interface to node, without retrying. + intf: string, interface + dstNode: destination Node + printError: if true, print error""" + intf = str( intf ) + cmd = 'ip link set %s netns %s' % ( intf, dstNode.pid ) + cmdOutput = quietRun( cmd ) + # If ip link set does not produce any output, then we can assume + # that the link has been moved successfully. + if cmdOutput: + if printError: + error( '*** Error: moveIntf: ' + intf + + ' not successfully moved to ' + dstNode.name + ':\n', + cmdOutput ) + return False + return True + +def moveIntf( intf, dstNode, printError=True, + retries=3, delaySecs=0.001 ): + """Move interface to node, retrying on failure. + intf: string, interface + dstNode: destination Node + printError: if true, print error""" + retry( retries, delaySecs, moveIntfNoRetry, intf, dstNode, + printError=printError ) + +# Support for dumping network + +def dumpNodeConnections( nodes ): + "Dump connections to/from nodes." + + def dumpConnections( node ): + "Helper function: dump connections to node" + for intf in node.intfList(): + output( ' %s:' % intf ) + if intf.link: + intfs = [ intf.link.intf1, intf.link.intf2 ] + intfs.remove( intf ) + output( intfs[ 0 ] ) + else: + output( ' ' ) + + for node in nodes: + output( node.name ) + dumpConnections( node ) + output( '\n' ) + +def dumpNetConnections( net ): + "Dump connections in network" + nodes = net.controllers + net.switches + net.hosts + dumpNodeConnections( nodes ) + +def dumpPorts( switches ): + "dump interface to openflow port mappings for each switch" + for switch in switches: + output( '%s ' % switch.name ) + for intf in switch.intfList(): + port = switch.ports[ intf ] + output( '%s:%d ' % ( intf, port ) ) + output( '\n' ) + +# IP and Mac address formatting and parsing + +def _colonHex( val, bytecount ): + """Generate colon-hex string. + val: input as unsigned int + bytecount: number of bytes to convert + returns: chStr colon-hex string""" + pieces = [] + for i in range( bytecount - 1, -1, -1 ): + piece = ( ( 0xff << ( i * 8 ) ) & val ) >> ( i * 8 ) + pieces.append( '%02x' % piece ) + chStr = ':'.join( pieces ) + return chStr + +def macColonHex( mac ): + """Generate MAC colon-hex string from unsigned int. + mac: MAC address as unsigned int + returns: macStr MAC colon-hex string""" + return _colonHex( mac, 6 ) + +def ipStr( ip ): + """Generate IP address string from an unsigned int. + ip: unsigned int of form w << 24 | x << 16 | y << 8 | z + returns: ip address string w.x.y.z""" + w = ( ip >> 24 ) & 0xff + x = ( ip >> 16 ) & 0xff + y = ( ip >> 8 ) & 0xff + z = ip & 0xff + return "%i.%i.%i.%i" % ( w, x, y, z ) + +def ipNum( w, x, y, z ): + """Generate unsigned int from components of IP address + returns: w << 24 | x << 16 | y << 8 | z""" + return ( w << 24 ) | ( x << 16 ) | ( y << 8 ) | z + +def ipAdd( i, prefixLen=8, ipBaseNum=0x0a000000 ): + """Return IP address string from ints + i: int to be added to ipbase + prefixLen: optional IP prefix length + ipBaseNum: option base IP address as int + returns IP address as string""" + imax = 0xffffffff >> prefixLen + assert i <= imax, 'Not enough IP addresses in the subnet' + mask = 0xffffffff ^ imax + ipnum = ( ipBaseNum & mask ) + i + return ipStr( ipnum ) + +def ipParse( ip ): + "Parse an IP address and return an unsigned int." + args = [ int( arg ) for arg in ip.split( '.' ) ] + while len(args) < 4: + args.insert( len(args) - 1, 0 ) + return ipNum( *args ) + +def netParse( ipstr ): + """Parse an IP network specification, returning + address and prefix len as unsigned ints""" + prefixLen = 0 + if '/' in ipstr: + ip, pf = ipstr.split( '/' ) + prefixLen = int( pf ) + # if no prefix is specified, set the prefix to 24 + else: + ip = ipstr + prefixLen = 24 + return ipParse( ip ), prefixLen + +def checkInt( s ): + "Check if input string is an int" + try: + int( s ) + return True + except ValueError: + return False + +def checkFloat( s ): + "Check if input string is a float" + try: + float( s ) + return True + except ValueError: + return False + +def makeNumeric( s ): + "Convert string to int or float if numeric." + if checkInt( s ): + return int( s ) + elif checkFloat( s ): + return float( s ) + else: + return s + +# Popen support + +def pmonitor(popens, timeoutms=500, readline=True, + readmax=1024 ): + """Monitor dict of hosts to popen objects + a line at a time + timeoutms: timeout for poll() + readline: return single line of output + yields: host, line/output (if any) + terminates: when all EOFs received""" + poller = poll() + fdToHost = {} + fdToDecoder = {} + for host, popen in popens.items(): + fd = popen.stdout.fileno() + fdToHost[ fd ] = host + fdToDecoder[ fd ] = getincrementaldecoder() + poller.register( fd, POLLIN ) + flags = fcntl( fd, F_GETFL ) + fcntl( fd, F_SETFL, flags | O_NONBLOCK ) + # pylint: disable=too-many-nested-blocks + while popens: + fds = poller.poll( timeoutms ) + if fds: + for fd, event in fds: + host = fdToHost[ fd ] + decoder = fdToDecoder[ fd ] + popen = popens[ host ] + if event & ( POLLIN | POLLHUP ): + while True: + try: + f = popen.stdout + line = decoder.decode( f.readline() if readline + else f.read( readmax ) ) + except IOError: + line = '' + if line == '': + break + yield host, line + if event & POLLHUP: + poller.unregister( fd ) + del popens[ host ] + else: + yield None, '' + +# Other stuff we use +def sysctlTestAndSet( name, limit ): + "Helper function to set sysctl limits" + # convert non-directory names into directory names + if '/' not in name: + name = '/proc/sys/' + name.replace( '.', '/' ) + # read limit + with open( name, 'r' ) as readFile: + oldLimit = readFile.readline() + if isinstance( limit, int ): + # compare integer limits before overriding + if int( oldLimit ) < limit: + with open( name, 'w' ) as writeFile: + writeFile.write( "%d" % limit ) + else: + # overwrite non-integer limits + with open( name, 'w' ) as writeFile: + writeFile.write( limit ) + +def rlimitTestAndSet( name, limit ): + "Helper function to set rlimits" + soft, hard = getrlimit( name ) + if soft < limit: + hardLimit = hard if limit < hard else limit + setrlimit( name, ( limit, hardLimit ) ) + +def fixLimits(): + "Fix ridiculously small resource limits." + debug( "*** Setting resource limits\n" ) + try: + rlimitTestAndSet( RLIMIT_NPROC, 8192 ) + rlimitTestAndSet( RLIMIT_NOFILE, 16384 ) + # Increase open file limit + sysctlTestAndSet( 'fs.file-max', 10000 ) + # Increase network buffer space + sysctlTestAndSet( 'net.core.wmem_max', 16777216 ) + sysctlTestAndSet( 'net.core.rmem_max', 16777216 ) + sysctlTestAndSet( 'net.ipv4.tcp_rmem', '10240 87380 16777216' ) + sysctlTestAndSet( 'net.ipv4.tcp_wmem', '10240 87380 16777216' ) + sysctlTestAndSet( 'net.core.netdev_max_backlog', 5000 ) + # Increase arp cache size + sysctlTestAndSet( 'net.ipv4.neigh.default.gc_thresh1', 4096 ) + sysctlTestAndSet( 'net.ipv4.neigh.default.gc_thresh2', 8192 ) + sysctlTestAndSet( 'net.ipv4.neigh.default.gc_thresh3', 16384 ) + # Increase routing table size + sysctlTestAndSet( 'net.ipv4.route.max_size', 32768 ) + # Increase number of PTYs for nodes + sysctlTestAndSet( 'kernel.pty.max', 20000 ) + # pylint: disable=broad-except + except Exception: + warn( "*** Error setting resource limits. " + "Mininet's performance may be affected.\n" ) + # pylint: enable=broad-except + +def mountCgroups( cgcontrol='cpu cpuacct cpuset' ): + """Mount cgroupfs if needed and return cgroup version + cgcontrol: cgroup controllers to check ('cpu cpuacct cpuset') + Returns: 'cgroup' | 'cgroup2' """ + # Try to read the cgroup controllers in cgcontrol + cglist = cgcontrol.split() + paths = ' '.join( '-g ' + c for c in cglist ) + cmd = 'cgget -n %s /' % paths + result = errRun( cmd ) + # If it failed, mount cgroupfs and retry + if result.ret or result.err or any( + c not in result.out for c in cglist ): + errFail( 'cgroupfs-mount' ) + result = errRun( cmd ) + errFail( cmd ) + # cpu.cfs_period_us is used for cgroup but not cgroup2 + if 'cpu.cfs_period_us' in result.out: + return 'cgroup' + return 'cgroup2' + +def natural( text ): + "To sort sanely/alphabetically: sorted( l, key=natural )" + def num( s ): + "Convert text segment to int if necessary" + return int( s ) if s.isdigit() else s + return [ num( s ) for s in re.split( r'(\d+)', str( text ) ) ] + +def naturalSeq( t ): + "Natural sort key function for sequences" + return [ natural( x ) for x in t ] + +def numCores(): + "Returns number of CPU cores based on /proc/cpuinfo" + if hasattr( numCores, 'ncores' ): + return numCores.ncores + try: + numCores.ncores = int( quietRun('grep -c processor /proc/cpuinfo') ) + except ValueError: + return 0 + return numCores.ncores + +def irange(start, end): + """Inclusive range from start to end (vs. Python insanity.) + irange(1,5) -> 1, 2, 3, 4, 5""" + return range( start, end + 1 ) + +def custom( cls, **params ): + "Returns customized constructor for class cls." + # Note: we may wish to see if we can use functools.partial() here + # and in customConstructor + def customized( *args, **kwargs): + "Customized constructor" + kwargs = kwargs.copy() + kwargs.update( params ) + return cls( *args, **kwargs ) + customized.__name__ = 'custom(%s,%s)' % ( cls, params ) + return customized + +def splitArgs( argstr ): + """Split argument string into usable python arguments + argstr: argument string with format fn,arg2,kw1=arg3... + returns: fn, args, kwargs""" + split = argstr.split( ',' ) + fn = split[ 0 ] + params = split[ 1: ] + # Convert int and float args; removes the need for function + # to be flexible with input arg formats. + args = [ makeNumeric( s ) for s in params if '=' not in s ] + kwargs = {} + for s in [ p for p in params if '=' in p ]: + key, val = s.split( '=', 1 ) + kwargs[ key ] = makeNumeric( val ) + return fn, args, kwargs + +def customClass( classes, argStr ): + """Return customized class based on argStr + The args and key/val pairs in argStr will be automatically applied + when the generated class is later used. + """ + cname, args, kwargs = splitArgs( argStr ) + cls = classes.get( cname, None ) + if not cls: + raise Exception( "error: %s is unknown - please specify one of %s" % + ( cname, classes.keys() ) ) + if not args and not kwargs: + return cls + + return specialClass( cls, append=args, defaults=kwargs ) + +def specialClass( cls, prepend=None, append=None, + defaults=None, override=None ): + """Like functools.partial, but it returns a class + prepend: arguments to prepend to argument list + append: arguments to append to argument list + defaults: default values for keyword arguments + override: keyword arguments to override""" + + if prepend is None: + prepend = [] + + if append is None: + append = [] + + if defaults is None: + defaults = {} + + if override is None: + override = {} + + class CustomClass( cls ): + "Customized subclass with preset args/params" + def __init__( self, *args, **params ): + newparams = defaults.copy() + newparams.update( params ) + newparams.update( override ) + cls.__init__( self, *( list( prepend ) + list( args ) + + list( append ) ), + **newparams ) + + CustomClass.__name__ = '%s%s' % ( cls.__name__, defaults ) + return CustomClass + + +def buildTopo( topos, topoStr ): + """Create topology from string with format (object, arg1, arg2,...). + input topos is a dict of topo names to constructors, possibly w/args. + """ + topo, args, kwargs = splitArgs( topoStr ) + if topo not in topos: + raise Exception( 'Invalid topo name %s' % topo ) + return topos[ topo ]( *args, **kwargs ) + +def ensureRoot(): + """Ensure that we are running as root. + + Probably we should only sudo when needed as per Big Switch's patch. + """ + if os.getuid() != 0: + error( '*** Mininet must run as root.\n' ) + exit( 1 ) + +def waitListening( client=None, server='127.0.0.1', port=80, timeout=None ): + """Wait until server is listening on port. + returns True if server is listening""" + runCmd = ( client.cmd if client else + partial( quietRun, shell=True ) ) + if not runCmd( 'which telnet' ): + raise Exception('Could not find telnet' ) + # pylint: disable=maybe-no-member + serverIP = server if isinstance( server, BaseString ) else server.IP() + cmd = ( 'echo A | telnet -e A %s %s' % ( serverIP, port ) ) + time = 0 + result = runCmd( cmd ) + while 'Connected' not in result: + if 'No route' in result: + rtable = runCmd( 'route' ) + error( 'no route to %s:\n%s' % ( server, rtable ) ) + return False + if timeout and time >= timeout: + error( 'could not connect to %s on port %d\n' % ( server, port ) ) + return False + debug( 'waiting for', server, 'to listen on port', port, '\n' ) + info( '.' ) + sleep( .5 ) + time += .5 + result = runCmd( cmd ) + return True + +def unitScale( num, prefix='' ): + "Return unit scale prefix and factor" + scale = 'kMGTP' + if prefix: + pos = scale.lower().index( prefix.lower() ) + return prefix, float( 10**(3*(pos+1)) ) + num, prefix, factor = float( num ), '', 1 + for i, c in enumerate(scale, start=1): + f = 10**(3*i) + if num < f: + break + prefix, factor = c, f + return prefix, float( factor ) + +def fmtBps( bps, prefix='', fmt='%.1f %sbits/sec' ): + """Return bps as iperf-style formatted rate string + prefix: lock to specific prefix (k, M, G, ...) + fmt: default format string for bps, prefix""" + bps = float( bps ) + prefix, factor = unitScale( bps, prefix ) + bps /= factor + return fmt % ( bps, prefix) diff --git a/project.py b/project.py new file mode 100644 index 0000000..a01a19f --- /dev/null +++ b/project.py @@ -0,0 +1,60 @@ + +from time import sleep +from mininet.cli import CLI +from mininet.net import Mininet +from mininet.util import dumpNodeConnections +from mininet.log import setLogLevel, info +from mininet.link import TCLink + +from project.node import BATMANRouter, LinuxRouter, OLSRRouter +from project.topo import GraphmlTopo + +def monitor_test(net): + for host in net.hosts: + host.cmd('./routing-table-monitor.sh') + enable_olsrd(net) + sleep(60) + for host in net.hosts: + host.cmd('kill %bash') + + +def do_enable_olsrd(self, line): + net = self.mn + enable_olsrd(net) + +def enable_olsrd(net): + for host in net.hosts: + host.cmd('olsrd -i ' + ' '.join(host.intfNames())) + + + +hosts = {'linuxrouter': LinuxRouter, 'olsrrouter': OLSRRouter, 'batmanrouter': BATMANRouter} +topos = {'gmltopo': GraphmlTopo} + +CLI.do_enable_olsrd = do_enable_olsrd + + +def perfTest(): + + topo = GraphmlTopo(filename='rural-gephi.graphml') + net = Mininet(topo=topo, link=TCLink, host=LinuxRouter) + net.start() + + info("Dumping host connections\n") + dumpNodeConnections(net.hosts) + + info("monitoring routing tables") + monitor_test(net) + + + info("Dumping host connections\n") + dumpNodeConnections(net.hosts) + + CLI(net) + net.stop() + + +if __name__ == '__main__': + setLogLevel('info') + # Prevent test_simpleperf from failing due to packet loss + perfTest() diff --git a/project/node.py b/project/node.py new file mode 100644 index 0000000..c61f8cf --- /dev/null +++ b/project/node.py @@ -0,0 +1,34 @@ +from mininet.node import Node + +class LinuxRouter(Node): + + def config(self, **params): + self.cmd('sysctl net.ipv4.ip_forward=1') + + def terminate(self): + self.cmd('sysctl net.ipv4.ip_forward=0') + super(LinuxRouter, self).terminate() + + +class OLSRRouter(LinuxRouter): + + def config(self, **params): + super(OLSRRouter, self).config(**params) + self.servicePid = self.cmd('olsrd -nofork -i ' + ' '.join(self.intfNames()) + ' >/dev/null 2>&1 & echo $!') + print(self.servicePid) + + def terminate(self): + self.cmd('kill ', self.servicePid) + super(OLSRRouter, self).terminate() + + +class BATMANRouter(LinuxRouter): + + def config(self, **params): + super(BATMANRouter, self).config(**params) + self.servicePid = self.cmd('batmand ' + ' '.join(self.intfNames()) + '& echo $!') + print(self.servicePid) + + def terminate(self): + self.cmd('kill ' + self.servicePid) + super(BATMANRouter, self).terminate() diff --git a/project/topo.py b/project/topo.py new file mode 100644 index 0000000..e54bb4b --- /dev/null +++ b/project/topo.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python + + +from sys import argv + +import xml.etree.ElementTree as ET + +from mininet.topo import Topo +from mininet.link import TCLink + + +# It would be nice if we didn't have to do this: +# pylint: disable=arguments-differ + + +class GraphmlTopo(Topo): + + def build(self, filename='topology.graphml'): + + positions = dict() + + try: + graph = ET.parse(filename).getroot() + except Exception as error: + print('oops: ', error) + exit(1) + + for node in graph.iter('{http://graphml.graphdrawing.org/xmlns}node'): + node_id = int(node.get('id')) + 1 + privateDirs = ['/var/log','/var/run'] + + self.addHost('h%i' % node_id, privateDirs=privateDirs) #, + #cls=LinuxRouter) + + x_pos = node.find('.//data[@key="x"]') + y_pos = node.find('.//data[@key="y"]') + positions[node_id] = (x_pos, y_pos) + + for link in graph.iter('{http://graphml.graphdrawing.org/xmlns}edge'): + + link_id = int(link.get('id')) + 1 + + source = int(link.get('source')) + 1 + target = int(link.get('target')) + 1 + + intfName_s, addr1, params_s = self.format_intf_params(link_id, source) + intfName_t, addr2, params_t = self.format_intf_params(link_id, target) + + linkopts = dict() # dict(bw=10, delay='5ms', loss=20, max_queue_size=1000, use_htb=True) + # to implement a function which from nodes positions return linkopts + self.addLink('h%i' % source, 'h%i' % target, key=link_id, cls=TCLink, + intfName1=intfName_s, addr1=addr1, params1=params_s, + intfName2=intfName_t, addr2=addr2 ,params2=params_t, + **linkopts) + + + def format_intf_params(self, link_id, node_id): + intf_name = 'h%i-eth%i' % (node_id, link_id) + addr = '00:00:00:00:%02i:%02i' % (link_id, node_id) + ip = '10.0.%i.%i/24' % (link_id, node_id) + params = {'ip': ip} + return intf_name, addr, params + + diff --git a/routing-table-monitor.sh b/routing-table-monitor.sh new file mode 100644 index 0000000..fcc83be --- /dev/null +++ b/routing-table-monitor.sh @@ -0,0 +1,38 @@ +name="$(ifconfig | awk -F- '$2 ~ /eth/ { print $1; exit;}')" +log_directory="./logs/" +log_file="${name}_routingtable.log" +log_path="${log_directory}${log_file}" + +echo > "$log_path" + +function parse_diff () { + + str=$(echo "$1" | sed '1,5d') + + add_line=$(echo "$str" | grep -E '^\+') + rem_line=$(echo "$str" | grep -E '^-') + + [ -n "$rem_line" ] && log "$(echo "$rem_line" | cut -c 2-)" "del:" + [ -n "$add_line" ] && log "$(echo "$add_line" | cut -c 2-)" "add:" +} + +function log () { + while IFS= read -r line; do + echo -e "$(date +"%Y-%m-%d_%H-%M-%S")\t$2\t$line" >> ${log_path} + done <<< "$1" +} + + +curr=$(route -n) +atstart="$(echo "$curr" | sed '1,2d')" +log "$atstart" "add:" + + +while true; +do + prev="$curr" + curr=$(route -n) + + diff_out=$(diff -u <(echo "$prev") <(echo "$curr")) + [ $? -eq 0 ] || parse_diff "$diff_out" +done diff --git a/whole-project.py b/whole-project.py new file mode 100644 index 0000000..09c5d9f --- /dev/null +++ b/whole-project.py @@ -0,0 +1,272 @@ +#!/usr/bin/env python + +from math import sqrt +import os +from signal import SIGINT +from time import sleep +from mininet.cli import CLI +from mininet.net import Mininet +from mininet.util import dumpNodeConnections, moveIntf, pmonitor +from mininet.log import setLogLevel, info +from mininet.link import TCLink + +from mininet.node import Node +from sys import argv +import xml.etree.ElementTree as ET +from mininet.topo import Topo +from mininet.link import TCLink + + +# It would be nice if we didn't have to do this: +# pylint: disable=arguments-differ + + +class GraphmlTopo(Topo): + + ip_counter = (0,1) + + def increment_ip_counter(self): + b3, b4 = GraphmlTopo.ip_counter + if b4 < 254: + b4 += 1 + else: + b3 += 1 + b4 = 1 + GraphmlTopo.ip_counter = b3, b4 + + def build(self, filename='topology.graphml', subnet=True): + + positions = dict() + + try: + graph = ET.parse(filename).getroot() + except Exception as error: + print('oops: ', error) + exit(1) + + for node in graph.iter('{http://graphml.graphdrawing.org/xmlns}node'): + node_id = int(node.get('id')) + 1 + privateDirs = ['/var/log','/var/run'] + + self.addHost('h%i' % node_id, privateDirs=privateDirs) #, + #cls=LinuxRouter) + + x_pos = node.find('.//data[@key="x"]') + y_pos = node.find('.//data[@key="y"]') + positions[node_id] = (x_pos, y_pos) + + for link in graph.iter('{http://graphml.graphdrawing.org/xmlns}edge'): + + link_id = int(link.get('id')) + 1 + + source = int(link.get('source')) + 1 + target = int(link.get('target')) + 1 + + if subnet: + intfName_s, addr1, params_s = self.format_intf_params(link_id, source) + intfName_t, addr2, params_t = self.format_intf_params(link_id, target) + else: + intfName_s, addr1, params_s = self.format_intf_params_incremental(source, 16) + intfName_t, addr2, params_t = self.format_intf_params_incremental(target, 16) + + linkopts = dict() # dict(bw=10, delay='5ms', loss=20, max_queue_size=1000, use_htb=True) + # to implement a function which from nodes positions return linkopts + self.addLink('h%i' % source, 'h%i' % target, key=link_id, cls=TCLink, + intfName1=intfName_s, addr1=addr1, params1=params_s, + intfName2=intfName_t, addr2=addr2 ,params2=params_t, + **linkopts) + + def format_intf_params_incremental(self, node_id, subnet_mask_cidr): + b3, b4 = GraphmlTopo.ip_counter + self.increment_ip_counter() + addr = '00:00:00:00:00:%02i' % (node_id) + params = {'ip': '10.0.%i.%i/%i' % (b3, b4, subnet_mask_cidr)} + return None, addr, params + + + def format_intf_params(self, link_id, node_id): + intf_name = 'h%i-eth%i' % (node_id, link_id) + addr = '00:00:00:00:%02i:%02i' % (link_id, node_id) + ip = '10.0.%i.%i/24' % (link_id, node_id) + params = {'ip': ip} + return intf_name, addr, params + + +def calculate_link_params(pos_1, pos_2, x=2): + distance = sqrt((pos_1['x'] + pos_2['x'])^2 - (pos_1['y'] + pos_2['y'])^2) + Tx_W = 2 + Rx_W = 2 + +class LinuxRouter(Node): + + def config(self, **params): + self.cmd('sysctl net.ipv4.ip_forward=1') + + def terminate(self): + self.cmd('sysctl net.ipv4.ip_forward=0') + super(LinuxRouter, self).terminate() + + def startRTMonitor(self): + self.popenRTMonitor = self.popen('bash routing-table-monitor.sh') + + def stopRTMonitor(self): + self.popenRTMonitor.send_signal(SIGINT) + self.popenRTMonitor.wait() + + +class OLSRRouter(LinuxRouter): + + def config(self, **params): + super(OLSRRouter, self).config(**params) + self.popenOLSR = self.popen('olsrd', '-nofork' , '-i', *self.intfNames()) + + def terminate(self): + self.popenOLSR.send_signal(SIGINT) + self.popenOLSR.wait() + super(OLSRRouter, self).terminate() + + +class OLSRRouterMonitored(OLSRRouter): + + def config(self, **params): + self.startRTMonitor() + super(OLSRRouterMonitored, self).config(**params) + + def terminate(self): + if self.popenRTMonitor: + self.popenRTMonitor.send_signal(SIGINT) + super(OLSRRouterMonitored, self).terminate() + + + +class BATMANDRouter(LinuxRouter): + + def config(self, **params): + super(BATMANDRouter, self).config(**params) + batman_param=[]#sum([['-a', i.IP()+'/32'] for i in self.intfList()],[]) + batman_param.extend(self.intfNames()) + self.cmd('batmand', *batman_param) + + def terminate(self): + # self.popenBATMAND.send_signal(SIGINT) + # self.popenBATMAND.wait() + super(BATMANDRouter, self).terminate() + + +class BATMANDRouterMonitored(BATMANDRouter): + + def config(self, **params): + self.startRTMonitor() + super(BATMANDRouterMonitored, self).config(**params) + + def terminate(self): + if self.popenRTMonitor: + self.popenRTMonitor.send_signal(SIGINT) + super(BATMANDRouterMonitored, self).terminate() + + +class BATMANADVRouter(LinuxRouter): + + def config(self, **params): + super(BATMANADVRouter, self).config(**params) + if self.cmd('lsmod', '|', 'grep', '-q', 'batman') is not 0: + self.cmd('modprobe', 'batman-adv') + + self.batman_intf = '%s-bat0' % self.name + self.cmd() + + for intf in self.intfNames(): + self.cmd('batctl', '-m', self.batman_intf, 'if', 'add', intf) + + moveIntf(self.batman_intf, self) + id = int(self.name[1:]) + self.cmd('ip', 'address', 'add', '192.168.123.%i/24' % id, 'dev', self.batman_intf) + self.cmd('ip', 'link', 'set', 'up', 'dev', self.batman_intf) + + def terminate(self): + self.cmd('batctl', '-m', self.batman_intf, 'if', 'destroy') + super(BATMANADVRouter, self).terminate() + + +def routing_table_monitor_test(net): + info("monitoring routing tables\n") + + while (net.pingAll() != 0): + sleep(5) + + info("routing tables converged\n") + + log("stop link h1-h14") + disable_link(net['h1'], net['h14']) + + + #net['h11'].stop() + + # sleep(60) + + # for host in net.hosts: + # host.cmd('kill %bash') + +def log(event): + os.system('echo "{}:\t\t$(date +"%T %N")" >> logs/global.log'.format(event)) + + +def do_disable_link(self, line): + net = self.mn + h1, h2 = line.split(" ") + disable_link(net[h1], net[h2]) + +def disable_link(host1, host2): + intf1, intf2 = host1.connectionsTo(host2)[0] + host1.cmd('iptables -A INPUT -m mac --mac-source', intf2.MAC(), '-j DROP') + +def do_enable_olsrd(self, line): + net = self.mn + enable_olsrd(net) + +def enable_olsrd(net): + for host in net.hosts: + host.cmd('olsrd -i ' + ' '.join(host.intfNames())) + + + +hosts = {'linuxrouter': LinuxRouter, + 'olsrrouter': OLSRRouter, + 'batmanadvrouter': BATMANADVRouter, + 'batmandrouter': BATMANDRouter} + +topos = {'gmltopo': GraphmlTopo} + +CLI.do_enable_olsrd = do_enable_olsrd +CLI.do_disable_link = do_disable_link + +def perfTest(): + + topo = GraphmlTopo(filename='rural2.graphml') + net = Mininet(topo=topo, link=TCLink, host=OLSRRouterMonitored) + net.start() + + info("Dumping host connections\n") + dumpNodeConnections(net.hosts) + + CLI(net) + net.stop() + +def perfTestBatman(): + + topo = GraphmlTopo(filename='rural2.graphml', subnet=False) + net = Mininet(topo=topo, link=TCLink, host=BATMANDRouterMonitored) + net.start() + + routing_table_monitor_test(net) + info("Dumping host connections\n") + dumpNodeConnections(net.hosts) + CLI(net) + net.stop() + + +if __name__ == '__main__': + setLogLevel('info') + # Prevent test_simpleperf from failing due to packet loss + perfTest() + #perfTestBatman()