예제 #1
0
파일: globals.go 프로젝트: prydeep/tegu
/*
	Sets up the global variables needed by the whole package. This should be invoked by the
	main tegu function (main/tegu.go).

	CAUTION:  this is not implemented as an init() function as we must pass information from the
			main to here.
*/
func Initialise(cfg_fname *string, ver *string, nwch chan *ipc.Chmsg, rmch chan *ipc.Chmsg, rmluch chan *ipc.Chmsg, osifch chan *ipc.Chmsg, fqch chan *ipc.Chmsg, amch chan *ipc.Chmsg) (err error) {
	err = nil

	def_log_dir := "."
	log_dir := &empty_str

	nw_ch = nwch
	rmgr_ch = rmch
	rmgrlu_ch = rmluch
	osif_ch = osifch
	fq_ch = fqch
	am_ch = amch

	if ver != nil {
		version = *ver
	}

	tegu_sheep = bleater.Mk_bleater(1, os.Stderr) // the main (parent) bleater used by libraries and as master 'volume' control
	tegu_sheep.Set_prefix("tegu")

	pid = os.Getpid() // used to keep reservation names unique across invocations

	tklr = ipc.Mk_tickler(30)                   // shouldn't need more than 30 different tickle spots
	tklr.Add_spot(2, rmgr_ch, REQ_NOOP, nil, 1) // a quick burst tickle to prevent a long block if the first goroutine to schedule a tickle schedules a long wait

	if cfg_fname != nil {
		cfg_data, err = config.Parse2strs(nil, *cfg_fname) // capture config data as strings -- referenced as cfg_data["sect"]["key"]
		if err != nil {
			err = fmt.Errorf("unable to parse config file %s: %s", *cfg_fname, err)
			return
		}

		if p := cfg_data["default"]["shell"]; p != nil {
			shell_cmd = *p
		}
		if p := cfg_data["default"]["verbose"]; p != nil {
			tegu_sheep.Set_level(uint(clike.Atoi(*p)))
		}
		if log_dir = cfg_data["default"]["log_dir"]; log_dir == nil {
			log_dir = &def_log_dir
		}
	} else {
		cfg_data = nil
	}

	tegu_sheep.Add_child(gizmos.Get_sheep()) // since we don't directly initialise the gizmo environment we ask for its sheep
	if *log_dir != "stderr" {                // if overriden in config
		lfn := tegu_sheep.Mk_logfile_nm(log_dir, 86400)
		tegu_sheep.Baa(1, "switching to log file: %s", *lfn)
		tegu_sheep.Append_target(*lfn, false)      // switch bleaters to the log file rather than stderr
		go tegu_sheep.Sheep_herder(log_dir, 86400) // start the function that will roll the log now and again
	}

	return
}
예제 #2
0
파일: msgrtr.go 프로젝트: prydeep/gopkgs
/*
	Initialises the message router and returns the channel that it will
	accept retquest (ipc structs) on allowing the user thread(s) to
	register for messages.  Port is the port that the http listener should
	camp on, and url is the url string that should be used.  Port may be of either of
	these two forms:
		interface:port
		port

	If interface is supplied, then the listener will be started only on that interface/port
	combination. If interface is omitted, then the listener will listen on all interfaces.
	This funciton may be invoked multiple times, with different ports, but be advised that
	all messages are funneled to the same handler.  Multiple invocations only serve to
	establish different interfaces and/or ports.
*/
func Start(port string, url string, usr_sheep *bleater.Bleater) chan *ipc.Chmsg {
	sheep = bleater.Mk_bleater(0, os.Stderr) // create our bleater
	sheep.Set_prefix("msgrtr")
	if usr_sheep != nil {
		usr_sheep.Add_child(sheep) // we become a child if given to us so that if the master vol is adjusted we'll react too
	} else {
		sheep.Set_level(1)
	}

	if disp_ch == nil {
		disp_ch = make(chan *ipc.Chmsg, 1024)
		go dispatcher(disp_ch)
	}

	go listen(url, port)

	return disp_ch
}
예제 #3
0
파일: res_mgr.go 프로젝트: att/tegu
/*
	Executes as a goroutine to drive the reservation manager portion of tegu.
*/
func Res_manager( my_chan chan *ipc.Chmsg, cookie *string ) {

	var (
		inv	*Inventory
		msg	*ipc.Chmsg
		ckptd	string
		last_qcheck	int64 = 0			// time that the last queue check was made to set window
		last_chkpt	int64 = 0			// time that the last checkpoint was written
		retry_chkpt bool = false		// checkpoint needs to be retried because of a timing issue
		queue_gen_type = REQ_GEN_EPQMAP
		alt_table = DEF_ALT_TABLE		// table number where meta marking happens
		all_sys_up	bool = false;		// set when we receive the all_up message; some functions (chkpt) must wait for this
		hto_limit 	int = 3600 * 18		// OVS has a size limit to the hard timeout value, this caps it just under the OVS limit
		res_refresh	int64 = 0			// next time when we must force all reservations to refresh flow-mods (hto_limit nonzero)
		rr_rate		int = 3600			// refresh rate (1 hour)
		favour_v6 bool = true			// favour ipv6 addresses if a host has both defined.
	)

	super_cookie = cookie				// global for all methods

	rm_sheep = bleater.Mk_bleater( 0, os.Stderr )		// allocate our bleater and attach it to the master
	rm_sheep.Set_prefix( "res_mgr" )
	tegu_sheep.Add_child( rm_sheep )					// we become a child so that if the master vol is adjusted we'll react too

	p := cfg_data["default"]["queue_type"]				// lives in default b/c used by fq-mgr too
	if p != nil {
		if *p == "endpoint" {
			queue_gen_type = REQ_GEN_EPQMAP
		} else {
			queue_gen_type = REQ_GEN_QMAP
		}
	}

	p = cfg_data["default"]["alttable"]				// alt table for meta marking
	if p != nil {
		alt_table = clike.Atoi( *p )
	}

	p = cfg_data["default"]["favour_ipv6"]
	if p != nil {
		favour_v6 = *p == "true"
	}

	if cfg_data["resmgr"] != nil {
		cdp := cfg_data["resmgr"]["chkpt_dir"]
		if cdp == nil {
			ckptd = "/var/lib/tegu/resmgr"							// default directory and prefix
		} else {
			ckptd = *cdp + "/resmgr"							// add prefix to directory in config
		}

		p = cfg_data["resmgr"]["verbose"]
		if p != nil {
			rm_sheep.Set_level(  uint( clike.Atoi( *p ) ) )
		}

		/*
		p = cfg_data["resmgr"]["set_vlan"]
		if p != nil {
			set_vlan = *p == "true"
		}
		*/

		p = cfg_data["resmgr"]["super_cookie"]
		if p != nil {
			super_cookie = p
			rm_sheep.Baa( 1, "super-cookie was set from config file" )
		}

		p = cfg_data["resmgr"]["hto_limit"]					// if OVS or whatever has a max timeout we can ensure it's not surpassed
		if p != nil {
			hto_limit = clike.Atoi( *p )
		}

		p = cfg_data["resmgr"]["res_refresh"]				// rate that reservations are refreshed if hto_limit is non-zero
		if p != nil {
			rr_rate = clike.Atoi( *p )
			if rr_rate < 900 {
				if rr_rate < 120 {
					rm_sheep.Baa( 0, "NOTICE: reservation refresh rate in config is insanely low (%ds) and was changed to 1800s", rr_rate )
					rr_rate = 1800
				} else {
					rm_sheep.Baa( 0, "NOTICE: reservation refresh rate in config is too low: %ds", rr_rate )
				}
			}
		}
	}

	send_meta_counter := 200;										// send meta f-mods only now and again
	rm_sheep.Baa( 1, "ovs table number %d used for metadata marking", alt_table )

	res_refresh = time.Now().Unix() + int64( rr_rate )				// set first refresh in an hour (ignored if hto_limit not set
	inv = Mk_inventory( )
	inv.chkpt = chkpt.Mk_chkpt( ckptd, 10, 90 )

	last_qcheck = time.Now().Unix()

	tkl_ch := make( chan *ipc.Chmsg, 5 )								// special, short buffer, channel for tickles allows 5 to queue before blocking sender
	tklr.Add_spot( 2, tkl_ch, REQ_PUSH, nil, ipc.FOREVER )				// push reservations to agent just before they go live
	tklr.Add_spot( 1, tkl_ch, REQ_SETQUEUES, nil, ipc.FOREVER )			// drives us to see if queues need to be adjusted
	tklr.Add_spot( 5, tkl_ch, REQ_RTRY_CHKPT, nil, ipc.FOREVER )		// ensures that we retried any missed checkpoints
	tklr.Add_spot( 60, tkl_ch, REQ_VET_RETRY, nil, ipc.FOREVER )		// run the retry queue if it has size

	go rm_lookup( rmgrlu_ch, inv )

	rm_sheep.Baa( 3, "res_mgr is running  %x", my_chan )
	for {
		select {									// select next ready message on either channel
			case msg = <- tkl_ch:					// msg available on tickle channel
				msg.State = nil						// nil state is OK, no error
				my_chan <- msg;						// just pass it through; tkl_ch has a small buffer (blocks quickly) and this prevents filling the main queue w/ tickles if we get busy	

			case msg = <- my_chan:					// process message from the main channel
				rm_sheep.Baa( 3, "processing message: %d", msg.Msg_type )
				switch msg.Msg_type {
					case REQ_NOOP:			// just ignore

					case REQ_ADD:
						msg.State = inv.Add_res( msg.Req_data )			// add will determine the pledge type and do the right thing
						msg.Response_data = nil


					case REQ_ALLUP:			// signals that all initialisation is complete (chkpting etc. can go)
						all_sys_up = true
						// periodic checkpointing turned off with the introduction of tegu_ha
						//tklr.Add_spot( 180, my_chan, REQ_CHKPT, nil, ipc.FOREVER )		// tickle spot to drive us every 180 seconds to checkpoint

					case REQ_RTRY_CHKPT:									// called to attempt to send a queued checkpoint request
						if all_sys_up {
							if retry_chkpt {
								rm_sheep.Baa( 3, "invoking checkpoint (retry)" )
								retry_chkpt, last_chkpt = inv.write_chkpt( last_chkpt )
							}
						}

					case REQ_CHKPT:											// external thread has requested checkpoint
						if all_sys_up {
							rm_sheep.Baa( 3, "invoking checkpoint" )
							retry_chkpt, last_chkpt = inv.write_chkpt( last_chkpt )
						}

					case REQ_DEL:											// user initiated delete -- requires cookie
						data := msg.Req_data.( []*string )					// assume pointers to name and cookie
						if data[0] != nil  &&  *data[0] == "all" {
							inv.Del_all_res( data[1] )
							msg.State = nil
						} else {
							msg.State = inv.Del_res( data[0], data[1] )
						}

						inv.push_reservations( my_chan, alt_table, int64( hto_limit ), favour_v6 )			// must force a push to push augmented (shortened) reservations
						msg.Response_data = nil

					case REQ_DUPCHECK:
						if msg.Req_data != nil {
							msg.Response_data, msg.State = inv.dup_check(  msg.Req_data.( *gizmos.Pledge ) )
						}

					case REQ_GET:											// user initiated get -- requires cookie
						data := msg.Req_data.( []*string )					// assume pointers to name and cookie
						msg.Response_data, msg.State = inv.Get_res( data[0], data[1] )

					case REQ_LIST:											// list reservations	(for a client)
						msg.Response_data, msg.State = inv.res2json( )

					case REQ_LOAD:								// load from a checkpoint file
						data := msg.Req_data.( *string )		// assume pointers to name and cookie
						msg.State = inv.load_chkpt( data )
						msg.Response_data = nil
						rm_sheep.Baa( 1, "checkpoint file loaded" )

					case REQ_PAUSE:
						msg.State = nil							// right now this cannot fail in ways we know about
						msg.Response_data = ""
						inv.pause_on()
						res_refresh = 0;						// must force a push of everything on next push tickle
						rm_sheep.Baa( 1, "pausing..." )

					case REQ_RESUME:
						msg.State = nil							// right now this cannot fail in ways we know about
						msg.Response_data = ""
						res_refresh = 0;						// must force a push of everything on next push tickle
						inv.pause_off()

					case REQ_SETQUEUES:							// driven about every second to reset the queues if a reservation state has changed
						now := time.Now().Unix()
						if now > last_qcheck  &&  inv.any_concluded( now - last_qcheck ) || inv.any_commencing( now - last_qcheck, 0 ) {
							rm_sheep.Baa( 1, "channel states: rm=%d rmlu=%d fq=%d net=%d agent=%d", len( rmgr_ch ), len( rmgrlu_ch ), len( fq_ch ), len( nw_ch ), len( am_ch ) )
							rm_sheep.Baa( 1, "reservation state change detected, requesting queue map from net-mgr" )
							tmsg := ipc.Mk_chmsg( )
							tmsg.Send_req( nw_ch, my_chan, queue_gen_type, time.Now().Unix(), nil )		// get a queue map; when it arrives we'll push to fqmgr and trigger flow-mod push
						}
						last_qcheck = now

					case REQ_PUSH:								// driven every few seconds to check for need to refresh because of switch max timeout setting
						if hto_limit > 0 {						// if reservation flow-mods are capped with a hard timeout limit
							now := time.Now().Unix()
							if now > res_refresh {
								rm_sheep.Baa( 2, "refreshing all reservations" )
								inv.reset_push()							// reset pushed flag on all reservations to cause active ones to be pushed again
								res_refresh = now + int64( rr_rate )		// push everything again in an hour

								inv.push_reservations( my_chan, alt_table, int64( hto_limit ), favour_v6 )			// force a push of all
							}
						}


					case REQ_PLEDGE_LIST:						// generate a list of pledges that are related to the given VM
						msg.Response_data, msg.State = inv.pledge_list(  msg.Req_data.( *string ) )

					case REQ_SETULCAP:							// user link capacity; expect array of two string pointers (name and value)
						data := msg.Req_data.( []*string )
						inv.add_ulcap( data[0], data[1] )
						retry_chkpt, last_chkpt = inv.write_chkpt( last_chkpt )

					// CAUTION: the requests below come back as asynch responses rather than as initial message
					case REQ_IE_RESERVE:						// an IE reservation failed
						msg.Response_ch = nil					// immediately disable to prevent loop
						inv.failed_push( msg )					// suss out the pledge and mark it unpushed

					case REQ_GEN_QMAP:							// response caries the queue map that now should be sent to fq-mgr to drive a queue update
						fallthrough

					case REQ_GEN_EPQMAP:
						rm_sheep.Baa( 1, "received queue map from network manager" )

						qlist := msg.Response_data.( []string )							// get the qulist map for our use first
						if send_meta_counter >= 200 {
							send_meta_fmods( qlist, alt_table )								// push meta rules
							send_meta_counter = 0
						} else {
							send_meta_counter++
						}

						msg.Response_ch = nil											// immediately disable to prevent loop
						fq_data := make( []interface{}, 1 )
						fq_data[FQ_QLIST] = msg.Response_data
						tmsg := ipc.Mk_chmsg( )
						tmsg.Send_req( fq_ch, nil, REQ_SETQUEUES, fq_data, nil )		// send the queue list to fq manager to deal with

						inv.push_reservations( my_chan, alt_table, int64( hto_limit ), favour_v6 )			// now safe to push reservations if any activated

					case REQ_VET_RETRY:
						if inv != nil && len( inv.retry ) > 0 {
							inv.vet_retries( )
						}

					case REQ_YANK_RES:										// yank a reservation from the inventory returning the pledge and allowing flow-mods to purge
						if msg.Response_ch != nil {
							msg.Response_data, msg.State = inv.yank_res( msg.Req_data.( *string ) )
						}

					/* deprecated -- moved to rm_lookup
					case REQ_GET_MIRRORS:									// user initiated get list of mirrors
						t := inv.Get_mirrorlist()
						msg.Response_data = &t;
					*/

					default:
						rm_sheep.Baa( 0, "WRN: res_mgr: unknown message: %d [TGURMG001]", msg.Msg_type )
						msg.Response_data = nil
						msg.State = fmt.Errorf( "res_mgr: unknown message (%d)", msg.Msg_type )
						msg.Response_ch = nil				// we don't respond to these.
				}	// end main channel case

		}		// end select

		rm_sheep.Baa( 3, "processing message complete: %d", msg.Msg_type )
		if msg.Response_ch != nil {			// if a response channel was provided
			msg.Response_ch <- msg			// send our result back to the requester
		}
	}
}
예제 #4
0
파일: osif.go 프로젝트: krjoshi/tegu
/*
	executed as a goroutine this loops waiting for messages from the tickler and takes
	action based on what is needed.
*/
func Osif_mgr(my_chan chan *ipc.Chmsg) {

	var (
		msg           *ipc.Chmsg
		os_list       string                    = ""
		os_sects      []string                       // sections in the config file
		os_refs       map[string]*ostack.Ostack      // creds for each project we need to request info from
		os_projects   map[string]*osif_project       // list of project info (maps)
		os_admin      *ostack.Ostack                 // admin creds
		refresh_delay int                       = 15 // config file can override
		id2pname      map[string]*string             // project id/name translation maps
		pname2id      map[string]*string
		req_token     bool    = false // if set to true in config file the token _must_ be present when called to validate
		def_passwd    *string         // defaults and what we assume are the admin creds
		def_usr       *string
		def_url       *string
		def_project   *string
		def_region    *string
	)

	osif_sheep = bleater.Mk_bleater(0, os.Stderr) // allocate our bleater and attach it to the master
	osif_sheep.Set_prefix("osif_mgr")
	tegu_sheep.Add_child(osif_sheep) // we become a child so that if the master vol is adjusted we'll react too

	//ostack.Set_debugging( 0 );

	// ---- pick up configuration file things of interest --------------------------

	if cfg_data["osif"] != nil { // cannot imagine that this section is missing, but don't fail if it is
		def_passwd = cfg_data["osif"]["passwd"] // defaults applied if non-section given in list, or info omitted from the section
		def_usr = cfg_data["osif"]["usr"]
		def_url = cfg_data["osif"]["url"]
		def_project = cfg_data["osif"]["project"]

		p := cfg_data["osif"]["refresh"]
		if p != nil {
			refresh_delay = clike.Atoi(*p)
			if refresh_delay < 15 {
				osif_sheep.Baa(1, "resresh was too small (%ds), setting to 15", refresh_delay)
				refresh_delay = 15
			}
		}

		p = cfg_data["osif"]["debug"]
		if p != nil {
			v := clike.Atoi(*p)
			if v > -5 {
				ostack.Set_debugging(v)
			}
		}

		p = cfg_data["osif"]["region"]
		if p != nil {
			def_region = p
		}

		p = cfg_data["osif"]["ostack_list"] // preferred placement in osif section
		if p == nil {
			p = cfg_data["default"]["ostack_list"] // originally in default, so backwards compatable
		}
		if p != nil {
			os_list = *p
		}

		p = cfg_data["osif"]["require_token"]
		if p != nil && *p == "true" {
			req_token = true
		}

		p = cfg_data["osif"]["verbose"]
		if p != nil {
			osif_sheep.Set_level(uint(clike.Atoi(*p)))
		}
	}

	if os_list == " " || os_list == "" || os_list == "off" {
		osif_sheep.Baa(0, "osif disabled: no openstack list (ostack_list) defined in configuration file or setting is 'off'")
	} else {
		// TODO -- investigate getting id2pname maps from each specific set of creds defined if an overarching admin name is not given

		os_admin = get_admin_creds(def_url, def_usr, def_passwd, def_project, def_region) // this will block until we authenticate
		if os_admin != nil {
			osif_sheep.Baa(1, "admin creds generated, mapping tenants")
			pname2id, id2pname, _ = os_admin.Map_tenants() // list only projects we belong to
			for k, v := range pname2id {
				osif_sheep.Baa(1, "project known: %s %s", k, *v) // useful to see in log what projects we can see
			}
		} else {
			id2pname = make(map[string]*string) // empty maps and we'll never generate a translation from project name to tenant ID since there are no default admin creds
			pname2id = make(map[string]*string)
			if def_project != nil {
				osif_sheep.Baa(0, "WRN: unable to use admin information (%s, proj=%s, reg=%s) to authorise with openstack  [TGUOSI009]", def_usr, def_project, def_region)
			} else {
				osif_sheep.Baa(0, "WRN: unable to use admin information (%s, proj=no-project, reg=%s) to authorise with openstack  [TGUOSI009]", def_usr, def_region) // YES msg ids are duplicated here
			}
		}

		if os_list == "all" {
			os_refs, _ = refresh_creds(os_admin, os_refs, id2pname) // for each project in id2pname get current ostack struct (auth)
			for k := range os_refs {
				osif_sheep.Baa(1, "inital os_list member: %s", k)
			}
		} else {
			if strings.Index(os_list, ",") > 0 {
				os_sects = strings.Split(os_list, ",")
			} else {
				os_sects = strings.Split(os_list, " ")
			}

			os_refs = make(map[string]*ostack.Ostack, len(os_sects)*2) // length is a guideline, not a hard value
			for i := 0; i < len(os_sects); i++ {
				osif_sheep.Baa(1, "creating openstack interface for %s", os_sects[i])
				url := def_url
				usr := def_usr
				passwd := def_passwd
				project := &os_sects[i]

				if cfg_data[os_sects[i]] != nil { // section name supplied, override defaults with information from the section
					if cfg_data[os_sects[i]]["url"] != nil {
						url = cfg_data[os_sects[i]]["url"]
					}
					if cfg_data[os_sects[i]]["usr"] != nil {
						usr = cfg_data[os_sects[i]]["usr"]
					}
					if cfg_data[os_sects[i]]["passwd"] != nil {
						passwd = cfg_data[os_sects[i]]["passwd"]
					}
					if cfg_data[os_sects[i]]["project"] != nil {
						project = cfg_data[os_sects[i]]["project"]
					}
				}
				os_refs[*project] = ostack.Mk_ostack(url, usr, passwd, project)
				os_refs["_ref_"] = os_refs[*project] // a quick access reference when any one will do
			}
		}

		os_projects = make(map[string]*osif_project)
		add2projects(os_projects, os_refs, pname2id, 0) // add refernces to the projects list
	}

	// ---------------- end config parsing ----------------------------------------

	if os_admin != nil { // only if we are using openstack as a database
		//tklr.Add_spot( 3, my_chan, REQ_GENCREDS, nil, 1 )						// add tickle spot to drive us once in 3s and then another to drive us based on config refresh rate
		tklr.Add_spot(int64(180), my_chan, REQ_GENCREDS, nil, ipc.FOREVER)
	}

	osif_sheep.Baa(2, "osif manager is running  %x", my_chan)
	for {
		msg = <-my_chan // wait for next message from tickler
		msg.State = nil // default to all OK

		osif_sheep.Baa(3, "processing request: %d", msg.Msg_type)
		switch msg.Msg_type {
		case REQ_GENMAPS: // driven by tickler
			// deprecated with switch to lazy update

		case REQ_GENCREDS: // driven by tickler now and then
			if os_admin != nil {
				os_refs, pname2id, id2pname = update_project(os_admin, os_refs, os_projects, pname2id, id2pname, os_list == "all")
			}

			/* ---- before lite ----
			case REQ_VM2IP:														// driven by tickler; gen a new vm translation map and push to net mgr
				m := mapvm2ip( os_refs )
				if m != nil {
					count := 0;
					msg := ipc.Mk_chmsg( )
					msg.Send_req( nw_ch, nil, REQ_VM2IP, m, nil )					// send new map to network as it is managed there
					osif_sheep.Baa( 2, "VM2IP mapping updated from openstack" )
					for k, v := range m {
						osif_sheep.Baa( 3, "VM mapped: %s ==> %s", k, *v )
						count++;
					}
					osif_sheep.Baa( 2, "mapped %d VM names/IDs from openstack (verbose 3 for debug list)", count )
				}
			*/

		case REQ_IP2MACMAP: // generate an ip to mac map and send to those who need it (fq_mgr at this point)
			freq := ipc.Mk_chmsg() // need a new request to pass to fq_mgr
			data, err := get_ip2mac(os_projects)
			if err == nil {
				osif_sheep.Baa(2, "sending ip2mac map to fq_mgr")
				freq.Send_req(fq_ch, nil, REQ_IP2MACMAP, data, nil) // request data forward
				msg.State = nil                                     // response ok back to requestor
			} else {
				msg.State = err // error goes back to requesting process
			}

		case REQ_CHOSTLIST:
			if msg.Response_ch != nil { // no sense going off to ostack if no place to send the list
				osif_sheep.Baa(2, "starting list host")
				msg.Response_data, msg.State = get_hosts(os_refs)
				osif_sheep.Baa(2, "finishing list host")
			} else {
				osif_sheep.Baa(0, "WRN: no response channel for host list request  [TGUOSI012]")
			}

			/* ======= don't think these are needed but holding ======
			case REQ_PROJNAME2ID:					// translate a project name (tenant) to ID
				if msg.Response_ch != nil {
					pname := msg.Req_data.( *string )
					if s, ok := pname2id[*pname]; ok {			// translate if there, else assume it's in it's "final" form
						msg.Response_data = s
					} else {
						msg.Response_data = pname
					}
				}

			*/

		case REQ_VALIDATE_TOKEN: // given token/tenant validate it and translate tenant name to ID if given; returns just ID
			if msg.Response_ch != nil {
				s := msg.Req_data.(*string)
				*s += "/"                                 // add trailing slant to simulate "data"
				if !have_project(s, pname2id, id2pname) { // ensure that we have creds for this project, if not attempt to get
					os_refs, pname2id, id2pname = update_project(os_admin, os_refs, os_projects, pname2id, id2pname, os_list == "all")
				}
				msg.Response_data, msg.State = validate_token(s, os_refs, pname2id, req_token)
			}

		case REQ_GET_HOSTINFO: // dig out all of the bits of host info for a single host from openstack and return in a network update struct
			if msg.Response_ch != nil {
				go get_os_hostinfo(msg, os_refs, os_projects, id2pname, pname2id) // do it asynch and return the result on the message channel
				msg = nil                                                         // prevent early response
			}

		case REQ_GET_PROJ_HOSTS:
			if msg.Response_ch != nil {
				go get_all_osvm_info(msg, os_refs, os_projects, id2pname, pname2id) // do it asynch and return the result on the message channel
				msg = nil                                                           // prevent response from this function
			}

		case REQ_GET_DEFGW: // dig out the default gateway for a project
			if msg.Response_ch != nil {
				go get_os_defgw(msg, os_refs, os_projects, id2pname, pname2id) // do it asynch and return the result on the message channel
				msg = nil                                                      // prevent early response
			}

		case REQ_VALIDATE_HOST: // validate and translate a [token/]project-name/host  string
			if msg.Response_ch != nil {
				if !have_project(msg.Req_data.(*string), pname2id, id2pname) { // ensure that we have creds for this project, if not attempt to get
					os_refs, pname2id, id2pname = update_project(os_admin, os_refs, os_projects, pname2id, id2pname, os_list == "all")
				}
				msg.Response_data, msg.State = validate_token(msg.Req_data.(*string), os_refs, pname2id, req_token)
			}

		case REQ_XLATE_HOST: // accepts a [token/][project/]host name and translate project to an ID
			if msg.Response_ch != nil {
				if !have_project(msg.Req_data.(*string), pname2id, id2pname) { // ensure that we have creds for this project, if not attempt to get
					os_refs, pname2id, id2pname = update_project(os_admin, os_refs, os_projects, pname2id, id2pname, os_list == "all")
				}
				msg.Response_data, msg.State = validate_token(msg.Req_data.(*string), os_refs, pname2id, false) // same process as validation but token not required
			}

		case REQ_VALIDATE_TEGU_ADMIN: // validate that the token is for the tegu user
			if msg.Response_ch != nil {
				if !have_project(msg.Req_data.(*string), pname2id, id2pname) { // ensure that we have creds for this project, if not attempt to get
					os_refs, pname2id, id2pname = update_project(os_admin, os_refs, os_projects, pname2id, id2pname, os_list == "all")
				}
				msg.State = validate_admin_token(os_admin, msg.Req_data.(*string), def_usr)
				msg.Response_data = ""
			}

		case REQ_HAS_ANY_ROLE: // given a token and list of roles, returns true if any role listed is listed by openstack for the token
			if msg.Response_ch != nil {
				d := msg.Req_data.(*string)
				dtoks := strings.Split(*d, " ") // data assumed to be token <space> role[,role...]
				if len(dtoks) > 1 {
					msg.Response_data, msg.State = has_any_role(os_refs, os_admin, &dtoks[0], &dtoks[1])
				} else {
					msg.State = fmt.Errorf("has_any_role: bad input data")
					msg.Response_data = false
				}

			}

		case REQ_PNAME2ID: // user, project, tenant (what ever) name to ID
			if msg.Response_ch != nil {
				msg.Response_data = pname2id[*(msg.Req_data.(*string))]
				if msg.Response_data.(*string) == nil { // maybe it was an ID that came in
					if id2pname[*(msg.Req_data.(*string))] != nil { // if in id map, then return the stirng (the id) they passed (#202)
						msg.Response_data = msg.Req_data.(*string)
					} else {
						msg.Response_data = nil // couldn't translate
					}
				}
			}

		default:
			osif_sheep.Baa(1, "unknown request: %d", msg.Msg_type)
			msg.Response_data = nil
			if msg.Response_ch != nil {
				msg.State = fmt.Errorf("osif: unknown request (%d)", msg.Msg_type)
			}
		}

		if msg != nil { // if msg wasn't passed off to a go routine
			osif_sheep.Baa(3, "processing request complete: %d", msg.Msg_type)

			if msg.Response_ch != nil { // if a reqponse channel was provided
				msg.Response_ch <- msg // send our result back to the requestor
			}
		}
	}
}
예제 #5
0
파일: tegu.go 프로젝트: dhanunjaya/tegu
func main() {
	var (
		version      string  = "v3.1.8/13076"
		cfg_file     *string = nil
		api_port     *string // command line option vars must be pointers
		verbose      *bool
		needs_help   *bool
		fl_host      *string
		super_cookie *string
		chkpt_file   *string

		// various comm channels for threads -- we declare them here so they can be passed to managers that need them
		nw_ch     chan *ipc.Chmsg // network graph manager
		rmgr_ch   chan *ipc.Chmsg // reservation manager
		rmgrlu_ch chan *ipc.Chmsg // reservation manager lookup channel
		osif_ch   chan *ipc.Chmsg // openstack interface
		fq_ch     chan *ipc.Chmsg // flow queue manager
		am_ch     chan *ipc.Chmsg // agent manager channel

		wgroup sync.WaitGroup
	)

	sheep = bleater.Mk_bleater(1, os.Stderr)
	sheep.Set_prefix("main/3.1")

	needs_help = flag.Bool("?", false, "show usage")

	chkpt_file = flag.String("c", "", "check-point-file")
	cfg_file = flag.String("C", "", "configuration-file")
	fl_host = flag.String("f", "", "floodlight_host:port")
	api_port = flag.String("p", "29444", "api_port")
	super_cookie = flag.String("s", "", "admin-cookie")
	verbose = flag.Bool("v", false, "verbose")

	flag.Parse() // actually parse the commandline

	if *needs_help {
		usage(version)
		os.Exit(0)
	}

	if *verbose {
		sheep.Set_level(1)
	}
	sheep.Baa(1, "tegu %s started", version)
	sheep.Baa(1, "http api is listening on: %s", *api_port)

	if *super_cookie == "" { // must have something and if not supplied this is probably not guessable without the code
		x := "20030217"
		super_cookie = &x
	}

	nw_ch = make(chan *ipc.Chmsg, 128)      // create the channels that the threads will listen to
	fq_ch = make(chan *ipc.Chmsg, 1024)     // reqmgr will spew requests expecting a response (asynch) only if there is an error, so channel must be buffered
	am_ch = make(chan *ipc.Chmsg, 1024)     // agent manager channel
	rmgr_ch = make(chan *ipc.Chmsg, 2048)   // resmgr main channel for most requests
	rmgrlu_ch = make(chan *ipc.Chmsg, 1024) // special channel for reservation look-ups (RMLU_ requests)
	osif_ch = make(chan *ipc.Chmsg, 1024)

	err := managers.Initialise(cfg_file, &version, nw_ch, rmgr_ch, rmgrlu_ch, osif_ch, fq_ch, am_ch) // specific things that must be initialised with data from main so init() doesn't work
	if err != nil {
		sheep.Baa(0, "ERR: unable to initialise: %s\n", err)
		os.Exit(1)
	}
	managers.Log_Restart(version)

	go managers.Http_api(api_port, nw_ch, rmgr_ch) // start early so we bind to port quickly, but don't allow requests until late
	go managers.Res_manager(rmgr_ch, super_cookie) // manage the reservation inventory
	go managers.Osif_mgr(osif_ch)                  // openstack interface; early so we get a list of stuff before we start network
	go managers.Network_mgr(nw_ch, fl_host)        // manage the network graph
	go managers.Agent_mgr(am_ch)
	go managers.Fq_mgr(fq_ch, fl_host)

	my_chan := make(chan *ipc.Chmsg) // channel and request block to ping net, and then to send all sys up
	req := ipc.Mk_chmsg()

	/*
		Block until the network is initialised. We need to do this so that when the checkpoint file is read reservations
		can be added without missing network pieces.  Even if there is no checkpoint file, or it's empty, blocking
		prevents reservation rejections because the network graph isn't in working order.  At the moment, with lazy
		updating, the block is until we have a physical host map back from the agent world.  This can sometimes take
		a minute or two.
	*/
	for { // hard block to wait on network readyness
		req.Response_data = 0
		req.Send_req(nw_ch, my_chan, managers.REQ_STATE, nil, nil) // 'ping' network manager; it will respond after initial build
		req = <-my_chan                                            // block until we have a response back

		if req.Response_data.(int) == 2 { // wait until we have everything that the network needs to build a reservation
			break
		}

		sheep.Baa(2, "waiting for network to initialise: need state 2, current state = %d", req.Response_data.(int))
		time.Sleep(5 * time.Second)
	}

	if *chkpt_file != "" {
		sheep.Baa(1, "network initialised, sending chkpt load request")
		req.Send_req(rmgr_ch, my_chan, managers.REQ_LOAD, chkpt_file, nil)
		req = <-my_chan // block until the file is loaded

		if req.State != nil {
			sheep.Baa(0, "ERR: unable to load checkpoint file: %s: %s\n", *chkpt_file, req.State)
			os.Exit(1)
		}
	} else {
		sheep.Baa(1, "network initialised, opening up system for all requests")
	}

	req.Send_req(rmgr_ch, nil, managers.REQ_ALLUP, nil, nil) // send all clear to the managers that need to know
	managers.Set_accept_state(true)                          // http doesn't have a control loop like others, so needs this

	wgroup.Add(1) // forces us to block forever since no goroutine gets the group to dec when finished (they dont!)
	wgroup.Wait()
	os.Exit(0)
}
예제 #6
0
파일: agent.go 프로젝트: prydeep/tegu
func Agent_mgr(ach chan *ipc.Chmsg) {
	var (
		port      string = "29055" // port we'll listen on for connections
		adata     *agent_data
		host_list string = ""
		dscp_list string = "46 26 18" // list of dscp values that are used to promote a packet to the pri queue in intermed switches
		refresh   int64  = 60
		iqrefresh int64  = 1800 // intermediate queue refresh (this can take a long time, keep from clogging the works)
	)

	adata = &agent_data{}
	adata.agents = make(map[string]*agent)

	am_sheep = bleater.Mk_bleater(0, os.Stderr) // allocate our bleater and attach it to the master
	am_sheep.Set_prefix("agentmgr")
	tegu_sheep.Add_child(am_sheep) // we become a child so that if the master vol is adjusted we'll react too

	// suss out config settings from our section
	if cfg_data["agent"] != nil {
		if p := cfg_data["agent"]["port"]; p != nil {
			port = *p
		}
		if p := cfg_data["agent"]["verbose"]; p != nil {
			am_sheep.Set_level(uint(clike.Atoi(*p)))
		}
		if p := cfg_data["agent"]["refresh"]; p != nil {
			refresh = int64(clike.Atoi(*p))
		}
		if p := cfg_data["agent"]["iqrefresh"]; p != nil {
			iqrefresh = int64(clike.Atoi(*p))
			if iqrefresh < 90 {
				am_sheep.Baa(1, "iqrefresh in configuration file is too small, set to 90 seconds")
				iqrefresh = 90
			}
		}
	}
	if cfg_data["default"] != nil { // we pick some things from the default section too
		if p := cfg_data["default"]["pri_dscp"]; p != nil { // list of dscp (diffserv) values that match for priority promotion
			dscp_list = *p
			am_sheep.Baa(1, "dscp priority list from config file: %s", dscp_list)
		} else {
			am_sheep.Baa(1, "dscp priority list not in config file, using defaults: %s", dscp_list)
		}
	}

	dscp_list = shift_values(dscp_list) // must shift values before giving to agent

	// enforce some sanity on config file settings
	am_sheep.Baa(1, "agent_mgr thread started: listening on port %s", port)

	tklr.Add_spot(2, ach, REQ_MAC2PHOST, nil, 1)                   // tickle once, very soon after starting, to get a mac translation
	tklr.Add_spot(10, ach, REQ_INTERMEDQ, nil, 1)                  // tickle once, very soon, to start an intermediate refresh asap
	tklr.Add_spot(refresh, ach, REQ_MAC2PHOST, nil, ipc.FOREVER)   // reocurring tickle to get host mapping
	tklr.Add_spot(iqrefresh, ach, REQ_INTERMEDQ, nil, ipc.FOREVER) // reocurring tickle to ensure intermediate switches are properly set

	sess_chan := make(chan *connman.Sess_data, 1024) // channel for comm from agents (buffers, disconns, etc)
	smgr := connman.NewManager(port, sess_chan)

	for {
		select { // wait on input from either channel
		case req := <-ach:
			req.State = nil // nil state is OK, no error

			am_sheep.Baa(3, "processing request %d", req.Msg_type)

			switch req.Msg_type {
			case REQ_NOOP: // just ignore -- acts like a ping if there is a return channel

			case REQ_SENDALL: // send request to all agents
				if req.Req_data != nil {
					adata.send2all(smgr, req.Req_data.(string))
				}

			case REQ_SENDLONG: // send a long request to one agent
				if req.Req_data != nil {
					adata.send2one(smgr, req.Req_data.(string))
				}

			case REQ_SENDSHORT: // send a short request to one agent (round robin)
				if req.Req_data != nil {
					adata.send2one(smgr, req.Req_data.(string))
				}

			case REQ_MAC2PHOST: // send a request for agent to generate  mac to phost map
				if host_list != "" {
					adata.send_mac2phost(smgr, &host_list)
				}

			case REQ_CHOSTLIST: // a host list from fq-manager
				if req.Req_data != nil {
					host_list = *(req.Req_data.(*string))
				}

			case REQ_INTERMEDQ:
				req.Response_ch = nil
				if host_list != "" {
					adata.send_intermedq(smgr, &host_list, &dscp_list)
				}

			}

			am_sheep.Baa(3, "processing request finished %d", req.Msg_type) // we seem to wedge in network, this will be chatty, but may help
			if req.Response_ch != nil {                                     // if response needed; send the request (updated) back
				req.Response_ch <- req
			}

		case sreq := <-sess_chan: // data from a connection or TCP listener
			switch sreq.State {
			case connman.ST_ACCEPTED: // newly accepted connection; no action

			case connman.ST_NEW: // new connection
				a := adata.Mk_agent(sreq.Id)
				am_sheep.Baa(1, "new agent: %s [%s]", a.id, sreq.Data)
				if host_list != "" { // immediate request for this
					adata.send_mac2phost(smgr, &host_list)
					adata.send_intermedq(smgr, &host_list, &dscp_list)
				}

			case connman.ST_DISC:
				am_sheep.Baa(1, "agent dropped: %s", sreq.Id)
				if _, not_nil := adata.agents[sreq.Id]; not_nil {
					delete(adata.agents, sreq.Id)
				} else {
					am_sheep.Baa(1, "did not find an agent with the id: %s", sreq.Id)
				}
				adata.build_list() // rebuild the list to drop the agent

			case connman.ST_DATA:
				if _, not_nil := adata.agents[sreq.Id]; not_nil {
					cval := 100
					if len(sreq.Buf) < 100 { // don't try to go beyond if chop value too large
						cval = len(sreq.Buf)
					}
					am_sheep.Baa(2, "data: [%s]  %d bytes received:  first 100b: %s", sreq.Id, len(sreq.Buf), sreq.Buf[0:cval])
					adata.agents[sreq.Id].process_input(sreq.Buf)
				} else {
					am_sheep.Baa(1, "data from unknown agent: [%s]  %d bytes ignored:  %s", sreq.Id, len(sreq.Buf), sreq.Buf)
				}
			}
		} // end select
	}
}
예제 #7
0
파일: fq_mgr.go 프로젝트: robert-eby/tegu
/*
	the main go routine to act on messages sent to our channel. We expect messages from the
	reservation manager, and from a tickler that causes us to evaluate the need to resize
	ovs queues.

	DSCP values:  Dscp values range from 0-64 decimal, but when described on or by
		flow-mods are shifted two bits to the left. The send flow mod function will
		do the needed shifting so all values outside of that one funciton should assume/use
		decimal values in the range of 0-64.

*/
func Fq_mgr(my_chan chan *ipc.Chmsg, sdn_host *string) {

	var (
		uri_prefix   string = ""
		msg          *ipc.Chmsg
		data         []interface{}      // generic list of data on some requests
		fdata        *Fq_req            // flow-mod request data
		qcheck_freq  int64              = 5
		hcheck_freq  int64              = 180
		host_list    *string                            // current set of openstack real hosts
		ip2mac       map[string]*string                 // translation from ip address to mac
		switch_hosts *string                            // from config file and overrides openstack list if given (mostly testing)
		ssq_cmd      *string                            // command string used to set switch queues (from config file)
		send_all     bool               = false         // send all flow-mods; false means send just ingress/egress and not intermediate switch f-mods
		alt_table    int                = DEF_ALT_TABLE // meta data marking table
		phost_suffix *string            = nil           // physical host suffix added to each host name in the list from openstack (config)

		//max_link_used	int64 = 0			// the current maximum link utilisation
	)

	fq_sheep = bleater.Mk_bleater(0, os.Stderr) // allocate our bleater and attach it to the master
	fq_sheep.Set_prefix("fq_mgr")
	tegu_sheep.Add_child(fq_sheep) // we become a child so that if the master vol is adjusted we'll react too

	// -------------- pick up config file data if there --------------------------------
	if *sdn_host == "" { // not supplied on command line, pull from config
		if sdn_host = cfg_data["default"]["sdn_host"]; sdn_host == nil { // no default; when not in config, then it's turned off and we send to agent
			sdn_host = &empty_str
		}
	}

	if cfg_data["default"]["queue_type"] != nil {
		if *cfg_data["default"]["queue_type"] == "endpoint" {
			send_all = false
		} else {
			send_all = true
		}
	}
	if p := cfg_data["default"]["alttable"]; p != nil { // this is the base; we use alt_table to alt_table + (n-1) when we need more than 1 table
		alt_table = clike.Atoi(*p)
	}

	if cfg_data["fqmgr"] != nil { // pick up things in our specific setion
		if dp := cfg_data["fqmgr"]["ssq_cmd"]; dp != nil { // set switch queue command
			ssq_cmd = dp
		}

		/*
			if p := cfg_data["fqmgr"]["default_dscp"]; p != nil {		// this is a single value and should not be confused with the dscp list in the default section of the config
				dscp = clike.Atoi( *p )
			}
		*/

		if p := cfg_data["fqmgr"]["queue_check"]; p != nil { // queue check frequency from the control file
			qcheck_freq = clike.Atoi64(*p)
			if qcheck_freq < 5 {
				qcheck_freq = 5
			}
		}

		if p := cfg_data["fqmgr"]["host_check"]; p != nil { // frequency of checking for new _real_ hosts from openstack
			hcheck_freq = clike.Atoi64(*p)
			if hcheck_freq < 30 {
				hcheck_freq = 30
			}
		}

		if p := cfg_data["fqmgr"]["switch_hosts"]; p != nil {
			switch_hosts = p
		}

		if p := cfg_data["fqmgr"]["verbose"]; p != nil {
			fq_sheep.Set_level(uint(clike.Atoi(*p)))
		}

		if p := cfg_data["fqmgr"]["phost_suffix"]; p != nil { // suffix added to physical host strings for agent commands
			if *p != "" {
				phost_suffix = p
				fq_sheep.Baa(1, "physical host names will be suffixed with: %s", *phost_suffix)
			}
		}
	}
	// ----- end config file munging ---------------------------------------------------

	//tklr.Add_spot( qcheck_freq, my_chan, REQ_SETQUEUES, nil, ipc.FOREVER );  	// tickle us every few seconds to adjust the ovs queues if needed

	if switch_hosts == nil {
		tklr.Add_spot(2, my_chan, REQ_CHOSTLIST, nil, 1)                     // tickle once, very soon after starting, to get a host list
		tklr.Add_spot(hcheck_freq, my_chan, REQ_CHOSTLIST, nil, ipc.FOREVER) // tickles us every once in a while to update host list
		fq_sheep.Baa(2, "host list will be requested from openstack every %ds", hcheck_freq)
	} else {
		host_list = switch_hosts
		fq_sheep.Baa(0, "static host list from config used for setting OVS queues: %s", *host_list)
	}

	if sdn_host != nil && *sdn_host != "" {
		uri_prefix = fmt.Sprintf("http://%s", *sdn_host)
	}

	fq_sheep.Baa(1, "flowmod-queue manager is running, sdn host: %s", *sdn_host)
	for {
		msg = <-my_chan // wait for next message
		msg.State = nil // default to all OK

		fq_sheep.Baa(3, "processing message: %d", msg.Msg_type)
		switch msg.Msg_type {
		case REQ_GEN_FMOD: // generic fmod; just pass it along w/o any special handling
			if msg.Req_data != nil {
				fdata = msg.Req_data.(*Fq_req) // pointer at struct with all of our expected goodies
				send_gfmod_agent(fdata, ip2mac, host_list, phost_suffix)
			}

		case REQ_BWOW_RESERVE: // oneway bandwidth flow-mod generation
			msg.Response_ch = nil          // nothing goes back from this
			fdata = msg.Req_data.(*Fq_req) // pointer at struct with all of the expected goodies
			send_bwow_fmods(fdata, ip2mac, phost_suffix)

		case REQ_BW_RESERVE: // bandwidth endpoint flow-mod creation; single agent script creates all needed fmods
			fdata = msg.Req_data.(*Fq_req) // pointer at struct with all of the expected goodies
			send_bw_fmods(fdata, ip2mac, phost_suffix)
			msg.Response_ch = nil // nothing goes back from this

		case REQ_IE_RESERVE: // proactive ingress/egress reservation flowmod  (this is likely deprecated as of 3/21/2015 -- resmgr invokes the bw_fmods script via agent)
			fdata = msg.Req_data.(*Fq_req) // user view of what the flow-mod should be

			if uri_prefix != "" { // an sdn controller -- skoogi -- is enabled
				msg.State = gizmos.SK_ie_flowmod(&uri_prefix, *fdata.Match.Ip1, *fdata.Match.Ip2, fdata.Expiry, fdata.Espq.Queuenum, fdata.Espq.Switch, fdata.Espq.Port)

				if msg.State == nil { // no error, no response to requestor
					fq_sheep.Baa(2, "proactive reserve successfully sent: uri=%s h1=%s h2=%s exp=%d qnum=%d swid=%s port=%d dscp=%d",
						uri_prefix, fdata.Match.Ip1, fdata.Match.Ip2, fdata.Expiry, fdata.Espq.Queuenum, fdata.Espq.Switch, fdata.Espq.Port)
					msg.Response_ch = nil
				} else {
					// do we need to suss out the id and mark it failed, or set a timer on it,  so as not to flood reqmgr with errors?
					fq_sheep.Baa(1, "ERR: proactive reserve failed: uri=%s h1=%s h2=%s exp=%d qnum=%d swid=%s port=%d  [TGUFQM008]",
						uri_prefix, fdata.Match.Ip1, fdata.Match.Ip2, fdata.Expiry, fdata.Espq.Queuenum, fdata.Espq.Switch, fdata.Espq.Port)
				}
			} else {
				// q-lite now generates one flowmod  in each direction because of the ITONS requirements
				if send_all || fdata.Espq.Queuenum > 1 { // if sending all fmods, or this has a non-intermediate queue
					cdata := fdata.Clone()       // copy so we can alter w/o affecting sender's copy
					if cdata.Espq.Port == -128 { // we'll assume in this case that the switch given is the host name and we need to set the switch to br-int
						swid := "br-int"
						cdata.Swid = &swid
					}

					if cdata.Resub == nil {
						resub_list := ""                         // resub to alternate table to set a meta mark, then to table 0 to hit openstack junk
						if cdata.Single_switch || fdata.Dir_in { // must use the base table for inbound traffic OR same switch traffic (bug 2015/1/26)
							resub_list = fmt.Sprintf("%d 0", alt_table) // base alt_table is for 'local' traffic (trafic that doesn't go through br-rl
						} else {
							resub_list = fmt.Sprintf("%d 0", alt_table+1) // base+1 is for OUTBOUND only traffic that must go through the rate limiting bridge
						}
						cdata.Resub = &resub_list
					}

					meta := "0x00/0x07" // match-value/mask; match only when meta neither of our two bits, nor the agent bit (0x04) are set
					cdata.Match.Meta = &meta

					if fdata.Dir_in { // inbound to this switch we need to revert dscp from our settings to the 'origianal' settings
						if cdata.Single_switch {
							cdata.Match.Dscp = -1 // there is no match if both on same switch
							send_gfmod_agent(cdata, ip2mac, host_list, phost_suffix)
						} else {
							cdata.Match.Dscp = cdata.Dscp // match the dscp that was added on ingress
							if !cdata.Dscp_koe {          // dropping the value on exit
								cdata.Action.Dscp = 0 // set action to turn it off, otherwise we let it ride (no overt action)
							}

							send_gfmod_agent(cdata, ip2mac, host_list, phost_suffix)
						}
					} else { // outbound from this switch set the dscp value specified on the reservation
						cdata.Match.Dscp = -1 // on outbound there is no dscp match, ensure this is off
						if cdata.Single_switch {
							send_gfmod_agent(cdata, ip2mac, host_list, phost_suffix) // in single switch mode there is no dscp value needed
						} else {
							cdata.Action.Dscp = cdata.Dscp // otherwise set the value and send
							send_gfmod_agent(cdata, ip2mac, host_list, phost_suffix)
						}
					}
				}

				msg.Response_ch = nil
			}

		case REQ_ST_RESERVE: // reservation fmods for traffic steering
			msg.Response_ch = nil // for now, nothing goes back
			if msg.Req_data != nil {
				fq_data := msg.Req_data.(*Fq_req) // request data
				if uri_prefix != "" {             // an sdn controller -- skoogi -- is enabled (not supported)
					fq_sheep.Baa(0, "ERR: steering reservations are not supported with skoogi (SDNC); no flow-mods pushed")
				} else {
					send_stfmod_agent(fq_data, ip2mac, host_list)
				}
			} else {
				fq_sheep.Baa(0, "CRI: missing data on st-reserve request to fq-mgr")
			}

		case REQ_SK_RESERVE: // send a reservation to skoogi
			data = msg.Req_data.([]interface{}) // msg data expected to be array of interface: h1, h2, expiry, queue h1/2 must be IP addresses
			if uri_prefix != "" {
				fq_sheep.Baa(2, "msg to reserve: %s %s %s %d %d", uri_prefix, data[0].(string), data[1].(string), data[2].(int64), data[3].(int))
				msg.State = gizmos.SK_reserve(&uri_prefix, data[0].(string), data[1].(string), data[2].(int64), data[3].(int))
			} else {
				fq_sheep.Baa(1, "reservation not sent, no sdn-host defined:  %s %s %s %d %d", uri_prefix, data[0].(string), data[1].(string), data[2].(int64), data[3].(int))
			}

		case REQ_SETQUEUES: // request from reservation manager which indicates something changed and queues need to be reset
			qlist := msg.Req_data.([]interface{})[0].([]string)
			if ssq_cmd != nil {
				adjust_queues(qlist, ssq_cmd, host_list) // if writing to a file and driving a local script
			} else {
				adjust_queues_agent(qlist, host_list, phost_suffix) // if sending json to an agent
			}

		case REQ_CHOSTLIST: // this is tricky as it comes from tickler as a request, and from osifmgr as a response, be careful!
			msg.Response_ch = nil // regardless of source, we should not reply to this request

			if msg.State != nil || msg.Response_data != nil { // response from ostack if with list or error
				if msg.Response_data.(*string) != nil {
					hls := strings.TrimLeft(*(msg.Response_data.(*string)), " \t") // ditch leading whitespace
					hl := &hls
					if *hl != "" {
						host_list = hl // ok to use it
						if phost_suffix != nil {
							fq_sheep.Baa(2, "host list from osif before suffix added: %s", *host_list)
							host_list = add_phost_suffix(host_list, phost_suffix) // in some cases ostack sends foo, but we really need to use foo-suffix (sigh)
						}
						send_hlist_agent(host_list) // send to agent_manager
						fq_sheep.Baa(2, "host list received from osif: %s", *host_list)
					} else {
						fq_sheep.Baa(1, "host list received from osif was discarded: ()")
					}
				} else {
					fq_sheep.Baa(0, "WRN: no  data from openstack; expected host list string  [TGUFQM009]")
				}
			} else {
				req_hosts(my_chan, fq_sheep) // send requests to osif for data
			}

		case REQ_IP2MACMAP: // a new map from osif
			if msg.Req_data != nil {
				newmap := msg.Req_data.(map[string]*string)
				if len(newmap) > 0 {
					ip2mac = newmap // safe to replace
					fq_sheep.Baa(2, "ip2mac translation received from osif: %d elements", len(ip2mac))
				} else {
					if ip2mac != nil {
						fq_sheep.Baa(2, "ip2mac translation received from osif: 0 elements -- kept old table with %d elements", len(ip2mac))
					} else {
						fq_sheep.Baa(2, "ip2mac translation received from osif: 0 elements -- no existing table to keep")
					}
				}
			} else {
				fq_sheep.Baa(0, "WRN: no  data from osif (nil map); expected ip2mac translation map  [TGUFQM010]")
			}
			msg.State = nil // state is always good

		default:
			fq_sheep.Baa(1, "unknown request: %d", msg.Msg_type)
			msg.Response_data = nil
			if msg.Response_ch != nil {
				msg.State = fmt.Errorf("unknown request (%d)", msg.Msg_type)
			}
		}

		fq_sheep.Baa(3, "processing message complete: %d", msg.Msg_type)
		if msg.Response_ch != nil { // if a reqponse channel was provided
			fq_sheep.Baa(3, "sending response: %d", msg.Msg_type)
			msg.Response_ch <- msg // send our result back to the requestor
		}
	}
}
예제 #8
0
파일: init.go 프로젝트: prydeep/tegu
/*
	Initialisation for the package; run once automatically at startup.
*/
func init() {
	obj_sheep = bleater.Mk_bleater(0, os.Stderr) // allocate our bleater
	obj_sheep.Set_prefix("gizmos")
}
예제 #9
0
func main() {

	home := os.Getenv("HOME")
	def_user := os.Getenv("LOGNAME")
	def_rdir := "/tmp/tegu_b" // rsync directory created on remote hosts
	def_rlist :=              // list of scripts to copy to remote hosts for execution
		"/usr/bin/create_ovs_queues " +
			"/usr/bin/map_mac2phost " +
			"/usr/bin/ovs_sp2uuid " +
			"/usr/bin/purge_ovs_queues " +
			"/usr/bin/ql_setup_irl " +
			"/usr/bin/ql_setup_ipt " +
			"/usr/bin/send_ovs_fmod " +
			"/usr/bin/tegu_add_mirror " +
			"/usr/bin/tegu_del_mirror " +
			"/usr/bin/ql_bw_fmods " +
			"/usr/bin/ql_bwow_fmods " +
			"/usr/bin/ql_set_trunks " +
			"/usr/bin/ql_filter_rtr " +
			"/usr/bin/setup_ovs_intermed "

	if home == "" {
		home = "/home/tegu" // probably bogus, but we'll have something
	}
	def_key := home + "/.ssh/id_rsa," + home + "/.ssh/id_dsa" // default ssh key to use

	needs_help := flag.Bool("?", false, "show usage") // define recognised command line options
	id := flag.Int("i", 0, "id")
	key_files := flag.String("k", def_key, "ssh-key file(s) for broker")
	log_dir := flag.String("l", "stderr", "log_dir")
	parallel := flag.Int("p", 10, "parallel ssh commands")
	no_rsync := flag.Bool("no-rsync", false, "turn off rsync")
	rdir := flag.String("rdir", def_rdir, "rsync remote directory")
	rlist := flag.String("rlist", def_rlist, "rsync file list")
	tegu_host := flag.String("h", "localhost:29055", "tegu_host:port")
	user := flag.String("u", def_user, "ssh user-name")
	verbose := flag.Bool("v", false, "verbose")
	vlevel := flag.Int("V", 1, "verbose-level")
	flag.Parse() // actually parse the commandline

	if *needs_help {
		usage(version)
		os.Exit(0)
	}

	if *id <= 0 {
		fmt.Fprintf(os.Stderr, "ERR: must enter -i id (number) on command line\n")
		os.Exit(1)
	}

	sheep = bleater.Mk_bleater(0, os.Stderr)
	sheep.Set_prefix(fmt.Sprintf("agent-%d", *id)) // append the pid so that if multiple agents are running they'll use different log files

	if *needs_help {
		usage(version)
		os.Exit(0)
	}

	if *verbose {
		sheep.Set_level(1)
	} else {
		if *vlevel > 0 {
			sheep.Set_level(uint(*vlevel))
		}
	}

	if *log_dir != "stderr" { // allow it to stay on stderr
		lfn := sheep.Mk_logfile_nm(log_dir, 86400)
		sheep.Baa(1, "switching to log file: %s", *lfn)
		sheep.Append_target(*lfn, false)      // switch bleaters to the log file rather than stderr
		go sheep.Sheep_herder(log_dir, 86400) // start the function that will roll the log now and again
	}

	sheep.Baa(1, "tegu_agent %s started", version)
	sheep.Baa(1, "will contact tegu on port: %s", *tegu_host)

	jc := jsontools.Mk_jsoncache()                  // create json cache to buffer tegu datagram input
	sess_mgr := make(chan *connman.Sess_data, 1024) // session management to create tegu connections with and drive the session listener(s)
	smgr := connman.NewManager("", sess_mgr)        // get a manager, but no listen port opened

	connect2tegu(smgr, tegu_host, sess_mgr) // establish initial connection

	ntoks, key_toks := token.Tokenise_populated(*key_files, " ,") // allow space or , seps and drop nil tokens
	if ntoks <= 0 {
		sheep.Baa(0, "CRI: no ssh key files given (-k)")
		os.Exit(1)
	}
	keys := make([]string, ntoks)
	for i := range key_toks {
		keys[i] = key_toks[i]
	}
	broker := ssh_broker.Mk_broker(*user, keys)
	if broker == nil {
		sheep.Baa(0, "CRI: unable to create an ssh broker")
		os.Exit(1)
	}
	if !*no_rsync {
		sheep.Baa(1, "will sync these files to remote hosts: %s", *rlist)
		broker.Add_rsync(rlist, rdir)
	}
	sheep.Baa(1, "successfully created ssh_broker for user: %s, command path: %s", *user, *rdir)
	broker.Start_initiators(*parallel)

	for {
		select { // wait on input from any channel -- just one now, but who knows
		case sreq := <-sess_mgr: // data from the network
			switch sreq.State {
			case connman.ST_ACCEPTED: // shouldn't happen
				sheep.Baa(1, "this shouldn't happen; accepted session????")

			case connman.ST_NEW: // new connection; nothing to process here

			case connman.ST_DISC:
				sheep.Baa(1, "session to tegu was lost")
				connect2tegu(smgr, tegu_host, sess_mgr) // blocks until connected and reports on the conn_ch channel when done
				broker.Reset()                          // reset the broker each time we pick up a new tegu connection

			case connman.ST_DATA:
				sheep.Baa(3, "data: [%s]  %d bytes received", sreq.Id, len(sreq.Buf))
				jc.Add_bytes(sreq.Buf)
				jblob := jc.Get_blob() // get next blob if ready
				for jblob != nil {
					resp := handle_blob(jblob, broker, rdir)
					if resp != nil {
						for i := range resp {
							smgr.Write(sreq.Id, resp[i])
						}
					}

					jblob = jc.Get_blob() // get next blob if more than one in the cache
				}
			}
		} // end select
	}
}