Пример #1
0
func main() {
	flag.Parse()

	var err error
	rs, err = rackspace.NewClient()
	if err != nil {
		log.Fatal("Could not create new rackspace client: %v\n", err)
	}

	serverId := *dcs0ServerId
	if serverId == "" {
		serverId = rs.ServerFromSnapshot("dcs-20g-systemd+dcs+psql",
			rackspace.CreateServerRequest{
				Name: "NEW-dcs-0",
				// This is a 4 GB standard instance
				FlavorRef: "5",
			})
	}

	log.Printf("-dcs0_server_id=%s\n", serverId)

	server, err := rs.GetServer(serverId)
	if err != nil {
		log.Fatal(err)
	}
	server = server.BecomeActiveOrDie(2 * time.Hour)

	// Attach an SSD block storage volume (unless one was specified)
	volumeId := *dcs0VolumeId
	if volumeId == "" {
		volumeId, err = rs.CreateBlockStorage(
			rackspace.CreateBlockStorageRequest{
				DisplayName: "NEW-dcs-src-0",
				Size:        220,
				VolumeType:  "SSD",
			})
		if err != nil {
			log.Fatal(err)
		}
	}

	// Attach an SSD block storage volume (unless one was specified)
	volumeMirrorId := *dcs0VolumeMirrorId
	if volumeMirrorId == "" {
		volumeMirrorId, err = rs.CreateBlockStorage(
			rackspace.CreateBlockStorageRequest{
				DisplayName: "NEW-dcs-mirror-0",
				// 100 GB is the minimum size. Last time we used 43 GB.
				Size:       100,
				VolumeType: "SSD",
			})
		if err != nil {
			log.Fatal(err)
		}
	}

	log.Printf("-dcs0_volume_id_dcs=%s\n", volumeId)
	log.Printf("-dcs0_volume_id_mirror=%s\n", volumeMirrorId)

	// We chose xvd[gh] so that it does not clash with xvda, xvdb and
	// whichever new devices a Rackspace base image might use in the
	// future :). We use a predictable path because it is easier than
	// figuring out the path in the subsequent automation.
	attachBlockStorageVolume(serverId, volumeId, "/dev/xvdg")
	attachBlockStorageVolume(serverId, volumeMirrorId, "/dev/xvdh")

	client, err := sshutil.Connect(server.AccessIPv6)
	if err != nil {
		log.Fatal(err)
	}

	// The /dcs/NEW/index.*.idx files are the artifact of the first stage, so
	// if they are present, skip the first stage entirely.
	if !client.Successful(`[ -s /dcs/NEW/index.0.idx ]`) {
		log.Printf("/dcs/NEW/index.0.idx not present, creating new index.\n")

		// Partition the remaining capacity on the server (≈ 160G) as on-disk
		// temporary space, since the temporary files exceed the 18G available in
		// /tmp on our base image.
		client = mountBlockStorage(client, "/dev/xvda2", "/tmp-disk")

		// Attach the SSD Block Storage volumes.
		client = mountBlockStorage(client, "/dev/xvdg1", "/dcs")
		client = mountBlockStorage(client, "/dev/xvdh1", "/dcs/source-mirror")

		client.RunOrDie("chown -R dcs.dcs /dcs/source-mirror")
		client.RunOrDie("chown -R dcs.dcs /dcs/")
		client.RunOrDie(`chmod 1777 /tmp-disk`)

		// TODO: timestamps after each step in update-index.sh would be nice
		client.WriteToFileOrDie("/dcs/update-index.sh", []byte(`#!/bin/sh
# Updates the source mirror, generates a new index, verifies it is serving and
# then swaps the old index with the new one.
#
# In case anything goes wrong, you can manually swap back the old index, see
# swap-index.sh

set -e

/bin/rm -rf /dcs/NEW /dcs/OLD /dcs/unpacked-new
/bin/mkdir /dcs/NEW /dcs/OLD

[ -d ~/.gnupg ] || mkdir ~/.gnupg
[ -e ~/.gnupg/trustedkeys.gpg ] || cp /usr/share/keyrings/debian-archive-keyring.gpg ~/.gnupg/trustedkeys.gpg

GOMAXPROCS=2 /usr/bin/dcs-debmirror -tcp_conns=20 >/tmp/fdm.log 2>&1
/usr/bin/debmirror --diff=none --method=http --rsync-extra=none -a none --source -s main -h http.debian.net -r /debian /dcs/source-mirror >/dev/null
/usr/bin/debmirror --diff=none --method=http --rsync-extra=none --exclude-deb-section=.* --include golang-mode --nocleanup -a none --arch amd64 -s main -h http.debian.net -r /debian /dcs/source-mirror >/dev/null

POPCONDUMP=$(mktemp)
if ! wget -q http://udd.debian.org/udd-popcon.sql.xz -O $POPCONDUMP
then
	wget -q http://public-udd-mirror.xvm.mit.edu/snapshots/udd-popcon.sql.xz -O $POPCONDUMP
fi
echo 'DROP TABLE popcon; DROP TABLE popcon_src;' | psql udd
xz -d -c $POPCONDUMP | psql udd
rm $POPCONDUMP

/usr/bin/compute-ranking \
	-mirror_path=/dcs/source-mirror

/usr/bin/dcs-unpack \
	-mirror_path=/dcs/source-mirror \
	-new_unpacked_path=/dcs/unpacked-new \
	-old_unpacked_path=/dcs/unpacked >/dev/null

/usr/bin/dcs-index \
	-index_shard_path=/dcs/NEW/ \
	-unpacked_path=/dcs/unpacked-new/ \
	-shards 6 >/dev/null

[ -d /dcs/unpacked ] && mv /dcs/unpacked /dcs/unpacked-old || true
mv /dcs/unpacked-new /dcs/unpacked
`))
		client.RunOrDie(`chmod +x /dcs/update-index.sh`)
		client.RunOrDie(`TMPDIR=/tmp-disk nohup su dcs -c "/bin/sh -c 'sh -x /dcs/update-index.sh >/tmp/update.log 2>&1 &'"`)

		// TODO: i also think we need some sort of lock here. perhaps let systemd run the updater?
	}

	log.Printf("Now waiting until /dcs/NEW/index.*.idx appear and are > 0 bytes\n")
	start := time.Now()
	errors := 0
	for time.Since(start) < 24*time.Hour {
		pollclient, err := sshutil.Connect(server.AccessIPv6)
		if err != nil {
			log.Printf("Non-fatal polling connection error: %v\n", err)
			errors++
			if errors > 30 {
				log.Fatal("More than 30 connection errors connecting to %s, giving up.\n", server.AccessIPv6)
			}
			continue
		}

		// TODO: flag for the number of shards
		shardsFound := `[ $(find /dcs/NEW/ -iname "index.*.idx" -size +0 -mmin +15 | wc -l) -eq 6 ]`
		if pollclient.Successful(shardsFound) {
			log.Printf("All shards present.\n")
			break
		}

		time.Sleep(15 * time.Minute)
	}

	var indexServerIds []string
	indexServerIds = strings.Split(*dcsIndexServerIds, ",")
	if *dcsIndexServerIds == "" {
		// TODO: flag for the number of shards/servers
		indexServerIds = make([]string, 6)

		for i := 0; i < len(indexServerIds); i++ {
			indexServerIds[i] = rs.ServerFromSnapshot("dcs-20g-systemd+dcs",
				rackspace.CreateServerRequest{
					Name: fmt.Sprintf("NEW-dcs-index-%d", i),
					// This is a 2 GB standard instance
					FlavorRef: "4",
				})
		}
	}

	log.Printf("-index_server_ids=%s\n", strings.Join(indexServerIds, ","))

	done := make(chan bool)
	indexServers := make([]rackspace.Server, len(indexServerIds))
	for i, _ := range indexServers {
		go func(i int) {
			server, err := rs.GetServer(indexServerIds[i])
			if err != nil {
				log.Fatal(err)
			}
			indexServers[i] = server.BecomeActiveOrDie(2 * time.Hour)
			done <- true
		}(i)
	}

	log.Printf("Waiting for all %d index servers to be available…\n",
		len(indexServerIds))
	for _ = range indexServers {
		<-done
	}

	log.Printf("Index servers available. Copying index…")

	pubkey, err := ioutil.ReadFile("/home/michael/.ssh/dcs-auto-rs")
	if err != nil {
		log.Fatal(err)
	}
	client.WriteToFileOrDie("~/.ssh/dcs-auto-rs", pubkey)
	client.RunOrDie("chmod 600 ~/.ssh/dcs-auto-rs")

	for i, server := range indexServers {
		go func(i int, server rackspace.Server) {
			// Create /dcs/
			indexclient, err := sshutil.Connect(server.AccessIPv6)
			if err != nil {
				log.Fatal("Failed to dial: " + err.Error())
			}
			indexclient.RunOrDie("mkdir -p /dcs/")
			if indexclient.Successful(fmt.Sprintf("[ -e /dcs/index.%d.idx ]", i)) {
				log.Printf("Index already present, skipping.\n")
				done <- true
				return
			}
			// “|| true” instead of “rm -f” because globbing fails when there are
			// no matching files.
			indexclient.RunOrDie("rm /dcs/index.*.idx || true")

			client.RunOrDie(
				fmt.Sprintf("scp -o StrictHostKeyChecking=no -i ~/.ssh/dcs-auto-rs /dcs/NEW/index.%d.idx root@%s:/dcs/",
					i,
					server.PrivateIPv4()))
			indexclient.RunOrDie(fmt.Sprintf("systemctl restart dcs-index-backend@%d.service", i))
			indexclient.RunOrDie(fmt.Sprintf("systemctl enable dcs-index-backend@%d.service", i))
			done <- true
		}(i, server)
	}
	log.Printf("Waiting for the index to be copied to all %d index servers…\n",
		len(indexServerIds))
	for _ = range indexServers {
		<-done
	}
	log.Printf("index copied!\n")

	backends := []string{}
	for i, server := range indexServers {
		backends = append(backends, fmt.Sprintf("%s:%d", server.PrivateIPv4(), 29080+i))
	}

	// TODO(longterm): configure firewall?

	client.RunOrDie("mkdir -p /etc/systemd/system/dcs-web.service.d/")

	client.WriteToFileOrDie(
		"/etc/systemd/system/dcs-web.service.d/backends.conf",
		[]byte(`[Service]
Environment=GOMAXPROCS=2
ExecStart=
ExecStart=/usr/bin/dcs-web \
    -template_pattern=/usr/share/dcs/templates/* \
	-listen_address=`+server.PrivateIPv4()+`:28080 \
	-use_sources_debian_net=true \
    -index_backends=`+strings.Join(backends, ",")))

	client.RunOrDie("systemctl daemon-reload")
	client.RunOrDie("systemctl enable dcs-web.service")
	client.RunOrDie("systemctl enable dcs-source-backend.service")
	client.RunOrDie("systemctl restart dcs-source-backend.service")
	client.RunOrDie("systemctl restart dcs-web.service")

	// Install and configure nginx.
	client.RunOrDie("DEBIAN_FRONTEND=noninteractive DEBCONF_NONINTERACTIVE_SEEN=true LC_ALL=C LANGUAGE=C LANG=C apt-get update")
	client.RunOrDie("DEBIAN_FRONTEND=noninteractive DEBCONF_NONINTERACTIVE_SEEN=true LC_ALL=C LANGUAGE=C LANG=C apt-get --force-yes -y install nginx")
	client.RunOrDie("rm /etc/nginx/sites-enabled/*")
	client.RunOrDie("mkdir -p /var/cache/nginx/cache")
	client.RunOrDie("mkdir -p /var/cache/nginx/tmp")
	client.RunOrDie("chown -R www-data.www-data /var/cache/nginx/")
	nginxHost, err := ioutil.ReadFile("/home/michael/gocode/src/github.com/Debian/dcs/nginx.example")
	if err != nil {
		log.Fatal(err)
	}
	// dcs-web is listening on the Rackspace ServiceNet (private) IP address.
	nginxReplaced := strings.Replace(string(nginxHost), "localhost:28080", server.PrivateIPv4()+":28080", -1)
	client.WriteToFileOrDie("/etc/nginx/sites-available/codesearch", []byte(nginxReplaced))
	client.RunOrDie("ln -s /etc/nginx/sites-available/codesearch /etc/nginx/sites-enabled/codesearch")
	client.RunOrDie("systemctl restart nginx.service")

	// Update DNS
	domainId, err := rs.GetDomainId("rackspace.zekjur.net")
	if err != nil {
		log.Fatal(err)
	}

	records, err := rs.GetDomainRecords(domainId)
	if err != nil {
		log.Fatal(err)
	}

	var updates []rackspace.Record
	for _, record := range records {
		if record.Name == "codesearch.rackspace.zekjur.net" {
			log.Printf("record %v\n", record)
			newIp := server.AccessIPv4
			if record.Type == "AAAA" {
				newIp = server.AccessIPv6
			}
			updates = append(updates,
				rackspace.Record{
					Id:   record.Id,
					Name: record.Name,
					Data: newIp,
				})
		} else if record.Name == "int-dcs-web.rackspace.zekjur.net" {
			// This record points to the private IPv4 address, used by our
			// monitoring.
			log.Printf("record %v\n", record)
			newIp := server.PrivateIPv4()
			updates = append(updates,
				rackspace.Record{
					Id:   record.Id,
					Name: record.Name,
					Data: newIp,
				})
		} else if record.Name == "int-dcs-source-backend.rackspace.zekjur.net" {
			// This record points to the private IPv4 address, used by our
			// monitoring.
			log.Printf("record %v\n", record)
			newIp := server.PrivateIPv4()
			updates = append(updates,
				rackspace.Record{
					Id:   record.Id,
					Name: record.Name,
					Data: newIp,
				})
		}
	}

	if err := rs.UpdateRecords(domainId, updates); err != nil {
		log.Fatal(err)
	}

	// TODO: reverse dns for the server

	log.Printf(`
codesearch was deployed to:
http://codesearch.rackspace.zekjur.net/
http://[%s]/
http://%s/
`, server.AccessIPv6, server.AccessIPv4)
}
Пример #2
0
func main() {
	flag.Parse()
	var err error
	rs, err = rackspace.NewClient()
	if err != nil {
		log.Fatal("Could not create new rackspace client: %v\n", err)
	}

	servers, err := rs.GetServers()
	if err != nil {
		log.Fatal(err)
	}

	// We look for servers called NEW-dcs-0 to figure out the newest.
	var (
		newestId       string
		newestCreation time.Time
	)

	for _, server := range servers {
		if server.Name == "NEW-dcs-0" &&
			server.Created().After(newestCreation) {
			newestId = server.Id
			newestCreation = server.Created()
		}
	}

	log.Printf("Newest NEW-dcs-0 server is %s (created at %v)\n",
		newestId, newestCreation)

	for _, server := range servers {
		if strings.HasPrefix(server.Name, "NEW-dcs-") &&
			server.Created().Before(newestCreation) {

			log.Printf("Deleting server %s (created %v, IPv4 %s, ID %s)\n",
				server.Name, server.Created(), server.AccessIPv4, server.Id)

			if !*dryrun {
				if err := rs.DeleteServer(server.Id); err != nil {
					log.Fatal(err)
				}
			}
		}
	}

	// TODO: wait until all servers have status == deleted

	// cleanup unused volumes
	volumes, err := rs.GetVolumes()
	if err != nil {
		log.Fatal(err)
	}

	for _, volume := range volumes {
		if strings.HasPrefix(volume.DisplayName, "NEW-dcs-") &&
			volume.Status() == "AVAILABLE" {
			log.Printf("Deleting unused codesearch volume %s\n", volume.Id)
			if !*dryrun {
				if err := rs.DeleteVolume(volume.Id); err != nil {
					log.Fatal(err)
				}
			}
		}
	}
}