func (sched *ExampleScheduler) genTask(tags map[string]string) *mesos.TaskInfo {
	taskId := &mesos.TaskID{
		Value: proto.String(strconv.Itoa(sched.tasksLaunched)),
	}
	labels := &mesos.Labels{
		Labels: []*mesos.Label{},
	}
	for key, value := range tags {
		log.Infoln("Tag being processed: " + key + " : " + value)
		labels.Labels = append(labels.Labels, shared.CreateLabel(key, value))
		log.Infoln("Current tags: %v", labels)
	}
	task := &mesos.TaskInfo{
		Name:     proto.String("go-task-" + taskId.GetValue()),
		TaskId:   taskId,
		Executor: sched.executor,
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", sched.cpuPerTask),
			util.NewScalarResource("mem", sched.memPerTask),
		},
		Labels: labels,
	}
	return task
}
func (sched *ExampleScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	logOffers(offers)
	log.Infof("received some offers, but do I care?")

	for _, offer := range offers {
		remainingCpus := getOfferCpu(offer)
		remainingMems := getOfferMem(offer)

		var tasks []*mesos.TaskInfo
		for sched.cpuPerTask <= remainingCpus &&
			sched.memPerTask <= remainingMems &&
			len(sched.TaskQueue) > 0 {
			log.Infof("Launched tasks: " + string(sched.tasksLaunched))
			log.Infof("Tasks remaining ot be launched: " + string(len(sched.TaskQueue)))

			sched.tasksLaunched++

			task := sched.popTask()
			taskType, err := shared.GetValueFromLabels(task.Labels, shared.Tags.TASK_TYPE)
			if err != nil {
				log.Infof("ERROR: Malformed task info, discarding task %v", task)
				return
			}
			targetHost, err := shared.GetValueFromLabels(task.Labels, shared.Tags.TARGET_HOST)
			if err != nil && taskType != shared.TaskTypes.RUN_CONTAINER {
				log.Infof("ERROR: Malformed task info, discarding task %v", task)
				return
			}
			containerName, err := shared.GetValueFromLabels(task.Labels, shared.Tags.CONTAINER_NAME)
			if err != nil {
				log.Infof("ERROR: Malformed task info, discarding task %v", task)
				return
			}

			foundAMatch := false
			switch taskType {
			case shared.TaskTypes.RESTORE_CONTAINER:
				if _, ok := sched.ContainerSlaveMap[containerName]; ok {
					log.Infof("ERROR: %s is already running", containerName)
					return
				}
				foundAMatch = true
				break
			case shared.TaskTypes.GET_LOGS:
				if targetHost == offer.GetHostname() {
					foundAMatch = sched.ContainerSlaveMap[containerName] == targetHost
				}
				break
			case shared.TaskTypes.CHECKPOINT_CONTAINER:
				if targetHost == offer.GetHostname() {
					foundAMatch = sched.ContainerSlaveMap[containerName] == targetHost
				}
				break
			case shared.TaskTypes.RESTORE_CONTAINER:
				if _, ok := sched.ContainerSlaveMap[containerName]; ok {
					log.Infof("ERROR: %s is already running", containerName)
					return
				}
				if targetHost == offer.GetHostname() {
					foundAMatch = true
				}
				break
			default:
				foundAMatch = true
				break
			}
			if foundAMatch {
				task.SlaveId = offer.SlaveId
				task.Labels.Labels = append(task.Labels.Labels, shared.CreateLabel(shared.Tags.ACCEPTED_HOST, *offer.Hostname))
				log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())

				tasks = append(tasks, task)
				remainingCpus -= sched.cpuPerTask
				remainingMems -= sched.memPerTask
			} else {
				defer sched.pushTask(task)
			}
		}
		log.Infoln("Launching ", len(tasks), "tasks for offer", offer.Id.GetValue(), "\nSlaveID: ", offer.GetSlaveId(), "SlaveHostname: ", offer.GetHostname())
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
	}
}