Example #1
0
func TestServe_NoTrailSlash(t *testing.T) {
	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		if r.URL.Path != "/bucket/dir-one/two/index.html" {
			w.WriteHeader(http.StatusNotFound)
			return
		}
		// stat request
		if r.Method != "HEAD" {
			t.Errorf("r.Method = %q; want HEAD", r.Method)
		}
	}))
	defer ts.Close()
	storage.Base = ts.URL
	config.Buckets = map[string]string{"default": "bucket"}

	req, _ := testInstance.NewRequest("GET", "/dir-one/two", nil)
	// make sure we're not getting memcached results
	if err := memcache.Flush(appengine.NewContext(req)); err != nil {
		t.Fatal(err)
	}
	res := httptest.NewRecorder()
	http.DefaultServeMux.ServeHTTP(res, req)
	if res.Code != http.StatusMovedPermanently {
		t.Errorf("res.Code = %d; want %d", res.Code, http.StatusMovedPermanently)
	}
	loc := "/dir-one/two/"
	if v := res.Header().Get("location"); v != loc {
		t.Errorf("location = %q; want %q", v, loc)
	}
}
// 2015-11-06 to force field EditSummary (even if empty) on every IdiomHistory persisted entity.
func resaveAllIdiomHistory(c context.Context) error {
	defer memcache.Flush(c)
	saved := 0
	keys, err := datastore.NewQuery("IdiomHistory").KeysOnly().GetAll(c, nil)
	if err != nil {
		return err
	}
	nbEntities := len(keys)

	defer func() {
		log.Infof(c, "Resaved %d IdiomHistory entities out of %d.", saved, nbEntities)
	}()

	for len(keys) > 0 {
		bunch := 100
		if len(keys) < bunch {
			bunch = len(keys)
		}
		histories := make([]*IdiomHistory, bunch)
		err := datastore.GetMulti(c, keys[:bunch], histories)
		if err != nil {
			return err
		}
		_, err = datastore.PutMulti(c, keys[:bunch], histories)
		if err != nil {
			return err
		}
		saved += bunch

		// Remove processed keys
		keys = keys[bunch:]
	}
	return nil
}
Example #3
0
func TestOpenFileNoTrailSlash(t *testing.T) {
	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		if r.URL.Path != "/bucket/no/slash/index.html" {
			w.WriteHeader(http.StatusNotFound)
			return
		}
		// stat request
		if r.Method != "HEAD" {
			t.Errorf("r.Method = %q; want HEAD", r.Method)
		}
	}))
	defer ts.Close()

	r, _ := testInstance.NewRequest("GET", "/", nil)
	ctx := appengine.NewContext(r)
	// make sure we're not getting memcached results
	if err := memcache.Flush(ctx); err != nil {
		t.Fatal(err)
	}

	stor := &Storage{Base: ts.URL, Index: "index.html"}
	o, err := stor.OpenFile(ctx, "bucket", "/no/slash")
	if err != nil {
		t.Fatalf("stor.OpenFile: %v", err)
	}
	defer o.Body.Close()
	loc := "/no/slash/"
	if v := o.Redirect(); v != loc {
		t.Errorf("o.Redirect() = %q; want %q", v, loc)
	}
	if v := o.RedirectCode(); v != http.StatusMovedPermanently {
		t.Errorf("o.RedirectCode() = %d; want %d", v, http.StatusMovedPermanently)
	}
}
func (a *MemcacheDatastoreAccessor) saveAppConfigProperty(c context.Context, prop AppConfigProperty) error {
	err := memcache.Flush(c)
	if err != nil {
		return err
	}
	return a.GaeDatastoreAccessor.saveAppConfigProperty(c, prop)
	// TODO force toggles refresh for all instances, after memcache flush
}
func (a *MemcacheDatastoreAccessor) saveAppConfig(c context.Context, appConfig ApplicationConfig) error {
	err := memcache.Flush(c)
	if err != nil {
		return err
	}
	return a.GaeDatastoreAccessor.saveAppConfig(c, appConfig)
	// TODO force toggles refresh for all instances, after memcache flush
}
func (a *MemcacheDatastoreAccessor) deleteAllIdioms(c context.Context) error {
	err := a.GaeDatastoreAccessor.deleteAllIdioms(c)
	if err != nil {
		return err
	}
	// Cache : the nuclear option!
	return memcache.Flush(c)
}
Example #7
0
// DeleteAll deletes across all roots
// DeleteAll deletes by kind alone.
func (fs *dsFileSys) DeleteAll() (string, error) {

	msg := ""
	{
		q := datastore.NewQuery(tfil).KeysOnly()
		var files []DsFile
		keys, err := q.GetAll(fs.Ctx(), &files)
		if err != nil {
			msg += "could not get file keys\n"
			return msg, err
		}
		if len(keys) >= 500 {
			msg += "limited to 500 files. REPEAT operation.\n"
			keys = keys[:500]
		}

		err = datastore.DeleteMulti(fs.Ctx(), keys)
		if err != nil {
			msg += "error deleting files\n"
			return msg, err
		}

		msg += spf("%v files deleted\n", len(keys))

	}

	{
		q := datastore.NewQuery(tdir).KeysOnly()
		var dirs []DsDir
		keys, err := q.GetAll(fs.Ctx(), &dirs)
		if err != nil {
			msg += "could not get dir keys\n"
			return msg, err
		}
		if len(keys) >= 500 {
			msg += "limited to 500 directories. REPEAT operation.\n"
			keys = keys[:500]
		}

		err = datastore.DeleteMulti(fs.Ctx(), keys)
		if err != nil {
			msg += "error deleting directories\n"
			return msg, err
		}

		msg += spf("%v directories deleted\n", len(keys))
	}

	err := memcache.Flush(fs.Ctx())
	if err != nil {
		msg += "error flushing memcache\n"
		return msg, err
	} else {
		msg += "memcache flushed \n"
	}

	return msg, nil
}
Example #8
0
func flushMemcache(w http.ResponseWriter, r *http.Request) {
	c := appengine.NewContext(r)
	c2 := aeOrig.NewContext(r)
	errMc := memcache.Flush(c)
	if errMc != nil {
		c2.Errorf("Error flushing memache: %v", errMc)
		return
	}
	w.Write([]byte("ok"))
}
Example #9
0
func DeleteSubtree(w http.ResponseWriter, r *http.Request, m map[string]interface{}) {

	lg, lge := loghttp.Logger(w, r)

	err := r.ParseForm()
	lge(err)

	wpf(w, tplx.ExecTplHelper(tplx.Head, map[string]interface{}{"HtmlTitle": "Delete Subtree for curr FS"}))
	defer wpf(w, tplx.Foot)

	if r.Method == "POST" {
		wpf(w, "<pre>\n")
		defer wpf(w, "\n</pre>")

		mountPoint := dsfs.MountPointLast()
		if len(r.FormValue("mountname")) > 0 {
			mountPoint = r.FormValue("mountname")
		}
		lg("mount point is %v", mountPoint)

		pathPrefix := "impossible-value"
		if len(r.FormValue("pathprefix")) > 0 {
			pathPrefix = r.FormValue("pathprefix")
		}
		lg("pathprefix is %v", pathPrefix)

		fs := getFS(appengine.NewContext(r), mountPoint)
		lg("created fs %v-%v ", fs.Name(), fs.String())

		lg("removing %q - and its subtree  ...", pathPrefix)
		err := fs.RemoveAll(pathPrefix)
		lge(err)

		errMc := memcache.Flush(appengine.NewContext(r))
		lge(errMc)

		if err == nil && errMc == nil {
			lg("success")
		}

	} else {
		tData := map[string]string{"Url": UriDeleteSubtree}
		err := tplBase.ExecuteTemplate(w, "tplName01", tData)
		lge(err)

	}

}
Example #10
0
func TestOpenAndCache(t *testing.T) {
	const body = `{"foo":"bar"}`
	meta := map[string]string{"content-type": "application/json"}
	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		for k, v := range meta {
			w.Header().Set(k, v)
		}
		w.Write([]byte(body))
	}))
	defer ts.Close()

	r, _ := testInstance.NewRequest("GET", "/", nil)
	ctx := appengine.NewContext(r)
	// make sure we're not getting memcached results
	if err := memcache.Flush(ctx); err != nil {
		t.Fatal(err)
	}

	stor := &Storage{Base: ts.URL}
	o, err := stor.Open(ctx, "bucket", "/file.json")
	if err != nil {
		t.Fatalf("stor.Open: %v", err)
	}
	defer o.Body.Close()
	b, err := ioutil.ReadAll(o.Body)
	if err != nil {
		t.Fatalf("ReadAll(o.Body): %v", err)
	}
	if string(b) != body {
		t.Errorf("o.Body = %q; want %q", b, body)
	}
	if !reflect.DeepEqual(o.Meta, meta) {
		t.Errorf("o.Meta = %+v; want %+v", o.Meta, meta)
	}

	key := stor.CacheKey("bucket", "/file.json")
	var ob objectBuf
	if _, err := memcache.Gob.Get(ctx, key, &ob); err != nil {
		t.Fatalf("memcache.Gob.Get(%q): %v", key, err)
	}
	if string(ob.Body) != body {
		t.Errorf("ob.Body = %q; want %q", ob.Body, body)
	}
	if !reflect.DeepEqual(ob.Meta, meta) {
		t.Errorf("ob.Meta = %+v; want %+v", ob.Meta, meta)
	}
}
Example #11
0
func TestOpenFileIndex(t *testing.T) {
	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		// dev_appserver app identity stub
		auth := "Bearer InvalidToken:https://www.googleapis.com/auth/devstorage.read_only"
		if v := r.Header.Get("authorization"); !strings.HasPrefix(v, auth) {
			t.Errorf("auth = %q; want prefix %q", v, auth)
		}
		if r.URL.Path != "/bucket/dir/index" {
			t.Errorf("r.URL.Path = %q; want /bucket/dir/index", r.URL.Path)
		}
		// weasel client => GCS always uses gzip where available
		if v := r.Header.Get("accept-encoding"); v != "gzip" {
			t.Errorf("accept-encoding = %q; want 'gzip'", v)
		}
		w.Header().Set("content-type", "text/plain")
		w.Write([]byte("test file"))
	}))
	defer ts.Close()

	req, _ := testInstance.NewRequest("GET", "/", nil)
	ctx := appengine.NewContext(req)
	// make sure we're not getting memcached results
	if err := memcache.Flush(ctx); err != nil {
		t.Fatal(err)
	}

	stor := &Storage{Base: ts.URL, Index: "index"}
	obj, err := stor.OpenFile(ctx, "bucket", "/dir/")
	if err != nil {
		t.Fatalf("stor.OpenFile: %v", err)
	}
	defer obj.Body.Close()
	b, _ := ioutil.ReadAll(obj.Body)
	if string(b) != "test file" {
		t.Errorf("obj.Body = %q; want 'test file'", b)
	}
}
Example #12
0
func cleanUpMemcache(s *testerator.Setup) error {
	memcache.Flush(s.Context)
	return nil
}
Example #13
0
// ResetMemcache resets all memcache entries
func ResetMemcache(ctx context.Context) error {
	return memcache.Flush(ctx)
}
Example #14
0
func (mc *gaeMemcache) flush(c context.Context) error {
	return memcache.Flush(c)
}
func (a *MemcacheDatastoreAccessor) deleteCache(c context.Context) error {
	return memcache.Flush(c)
}
Example #16
0
func TestServe_DefaultGCS(t *testing.T) {
	const (
		bucket       = "default-bucket"
		reqFile      = "/dir/"
		realFile     = bucket + "/dir/index.html"
		contents     = "contents"
		contentType  = "text/plain"
		cacheControl = "public,max-age=0"
		// dev_appserver app identity stub
		authorization = "Bearer InvalidToken:https://www.googleapis.com/auth/devstorage.read_only"
	)

	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		if r.URL.Path[1:] != realFile {
			t.Errorf("r.URL.Path = %q; want /%s", r.URL.Path, realFile)
		}
		if v := r.Header.Get("authorization"); !strings.HasPrefix(v, authorization) {
			t.Errorf("auth = %q; want prefix %q", v, authorization)
		}
		if v, exist := r.Header["X-Foo"]; exist {
			t.Errorf("found x-foo: %q", v)
		}
		// weasel client => GCS always uses gzip where available
		if v := r.Header.Get("accept-encoding"); v != "gzip" {
			t.Errorf("accept-encoding = %q; want 'gzip'", v)
		}
		w.Header().Set("cache-control", cacheControl)
		w.Header().Set("content-type", contentType)
		w.Header().Set("x-test", "should not propagate")
		w.Write([]byte(contents))
	}))
	defer ts.Close()
	storage.Base = ts.URL
	config.Buckets = map[string]string{"default": bucket}

	req, _ := testInstance.NewRequest("GET", reqFile, nil)
	req.Header.Set("accept-encoding", "client/accept")
	req.Header.Set("x-foo", "bar")
	// make sure we're not getting memcached results
	if err := memcache.Flush(appengine.NewContext(req)); err != nil {
		t.Fatal(err)
	}

	res := httptest.NewRecorder()
	http.DefaultServeMux.ServeHTTP(res, req)
	if res.Code != http.StatusOK {
		t.Errorf("res.Code = %d; want %d", res.Code, http.StatusOK)
	}
	if v := res.Header().Get("cache-control"); v != cacheControl {
		t.Errorf("cache-control = %q; want %q", v, cacheControl)
	}
	if v := res.Header().Get("content-type"); v != contentType {
		t.Errorf("content-type = %q; want %q", v, contentType)
	}
	if v := res.Header().Get("x-test"); v != "" {
		t.Errorf("found x-test header: %q", v)
	}
	if s := res.Body.String(); s != contents {
		t.Errorf("res.Body = %q; want %q", s, contents)
	}
}
Example #17
0
func (m mcImpl) Flush() error {
	return memcache.Flush(m.aeCtx)
}
Example #18
0
func receiveUpload(w http.ResponseWriter, r *http.Request, m map[string]interface{}) {

	lg, _ := loghttp.Logger(w, r)
	c := appengine.NewContext(r)

	// parsing multipart before anything else
	err := r.ParseMultipartForm(1024 * 1024 * 2)
	if err != nil {
		lg("Multipart parsing failed: %v", err)
		return
	}

	wpf(w, tplx.ExecTplHelper(tplx.Head, map[string]interface{}{"HtmlTitle": "Receive an Upload"}))
	defer wpf(w, tplx.Foot)
	wpf(w, "<pre>")
	defer wpf(w, "</pre>")

	fields := []string{"getparam1", "mountname", "description"}
	for _, v := range fields {
		lg("%12v => %q", v, r.FormValue(v))
	}

	mountPoint := dsfs.MountPointLast()
	if len(r.FormValue("mountname")) > 0 {
		mountPoint = r.FormValue("mountname")
	}
	lg("mount point is %v", mountPoint)

	fs1 := dsfs.New(
		dsfs.MountName(mountPoint),
		dsfs.AeContext(c),
	)

	// As closure, since we cannot define dsfs.dsFileSys as parameter
	funcSave := func(argName string, data []byte) (error, *bytes.Buffer) {

		b1 := new(bytes.Buffer)

		fs1 := dsfs.New(
			dsfs.MountName(mountPoint),
			dsfs.AeContext(c),
		)

		dir, bname := fs1.SplitX(argName)

		err := fs1.MkdirAll(dir, 0777)
		wpf(b1, "mkdir %v - %v\n", dir, err)
		if err != nil {
			return err, b1
		}

		err = fs1.WriteFile(path.Join(dir, bname), data, 0777)
		wpf(b1, "saved file content to %v - %v\n", argName, err)

		return err, b1
	}

	ff := "filefield"

	file, handler, err := r.FormFile(ff)
	if err != nil {
		lg("error calling FormFile from %q  => %v", ff, err)
		return
	}

	if handler == nil {
		lg("no multipart file %q", ff)
	} else {
		lg("extracted file %v", handler.Filename)

		data, err := ioutil.ReadAll(file)
		if err != nil {
			lg("ReadAll on uploaded file failed: %v", err)
			return
		}
		defer file.Close()
		lg("extracted file content;  %v bytes", len(data))

		newFilename := docRootDataStore + handler.Filename
		ext := path.Ext(newFilename)

		if ext == ".zip" {

			lg("found zip - treat as dir-tree %q", newFilename)

			r, err := zip.NewReader(file, int64(len(data)))
			if err != nil {
				lg("open as zip failed: %v", err)
				return
			}

			for _, f := range r.File {
				newFilename = docRootDataStore + f.Name

				dir, bname := fs1.SplitX(newFilename)

				if f.FileInfo().IsDir() {

					lg("\t dir %s", newFilename)

					err := fs1.MkdirAll(path.Join(dir, bname), 0777)
					if err != nil {
						lg("MkdirAll %v failed: %v", newFilename, err)
						return
					}

				} else {

					lg("\t file %s", newFilename)

					rc, err := f.Open()
					if err != nil {
						return
					}
					defer func(rc io.ReadCloser) {
						if err := rc.Close(); err != nil {
							panic(err)
						}
					}(rc)

					bts := new(bytes.Buffer)
					size, err := io.Copy(bts, rc)
					if err != nil {
						lg("Could not copy from zipped file %v: %v", newFilename, err)
						return
					}

					err = common.WriteFile(fsi.FileSystem(fs1), path.Join(dir, bname), bts.Bytes())

					// err = fs1.WriteFile(path.Join(dir, bname), bts.Bytes(), 0777)
					if err != nil {
						lg("WriteFile of zipped file %v failed: %v", newFilename, err)
						return
					}
					lg("\t  saved %v - %v Bytes", newFilename, size)

				}

			}

		} else {

			err, b2 := funcSave(newFilename, data)
			lg("%s", b2)
			if err != nil {
				return
			}

		}

		errMc := memcache.Flush(appengine.NewContext(r))
		if errMc != nil {
			lg("Error flushing memache: %v", errMc)
			return
		}

		lg("--------------------\n")

	}

}
func adminRepairHistoryVersions(w http.ResponseWriter, r *http.Request) error {
	c := appengine.NewContext(r)
	defer memcache.Flush(c)

	idiomIDStr := r.FormValue("idiomId")
	if idiomIDStr == "" {
		return PiError{"Mandatory param: idiomId", http.StatusBadRequest}
	}
	idiomID := String2Int(idiomIDStr)

	// Warning: fetching the whole history of 1 idiom
	// may have quite a big memory footprint
	log.Infof(c, "Repairing versions for idiom: %v", idiomID)

	q := datastore.NewQuery("IdiomHistory").
		Filter("Id =", idiomID).
		Order("VersionDate")
	histories := make([]*IdiomHistory, 0)
	historyKeys, err := q.GetAll(c, &histories)
	if err != nil {
		return err
	}
	for i := range histories[1:] {
		if histories[i].VersionDate.After(histories[i+1].VersionDate) {
			return PiError{ErrorText: "History items not well sorted", Code: 500}
		}
	}

	for i := range histories {
		histories[i].Version = 1 + i
	}
	lastVersion := len(histories)
	log.Infof(c, "\tSaving %v history entities.", len(histories))
	for len(historyKeys) > 0 {
		bunch := 10
		if len(historyKeys) < 10 {
			bunch = len(historyKeys)
		}
		_, err = datastore.PutMulti(c, historyKeys[:bunch], histories[:bunch])
		if err != nil {
			return err
		}
		// Remove processed items
		historyKeys = historyKeys[bunch:]
		histories = histories[bunch:]
	}

	var idiom Idiom
	idiomKey := newIdiomKey(c, idiomID)
	err = datastore.Get(c, idiomKey, &idiom)
	if err != nil {
		return err
	}
	if idiom.Version == lastVersion {
		log.Infof(c, "\tIdiom version %v already clean", idiom.Version)
	} else {
		log.Infof(c, "\tFixing idiom version %v -> %v", idiom.Version, lastVersion)
		idiom.Version = lastVersion
		_, err = datastore.Put(c, idiomKey, &idiom)
		if err != nil {
			return err
		}
	}

	w.Header().Set("Content-Type", "application/json")
	fmt.Fprintln(w, Response{"success": true, "message": "History repaired for idiom " + idiomIDStr})
	return nil
}
Example #20
0
//I need to implement a token on this form
func csvimport(w http.ResponseWriter, r *http.Request) *appError {
	c := appengine.NewContext(r)
	log.Infof(c, "method: ", r.Method)

	if r.Method != "POST" {
		return &appError{
			errors.New("Unsupported method call to import"),
			"Imports most be POSTed",
			http.StatusMethodNotAllowed,
		}
	}

	//this block for check the user's credentials should eventually be broken out into a filter
	u := user.Current(c)
	if u == nil {
		url, err := user.LoginURL(c, r.URL.String())
		if err != nil {
			return &appError{err, "Could not determine LoginURL", http.StatusInternalServerError}
		}
		w.Header().Set("Location", url)
		w.WriteHeader(http.StatusFound)
		return nil
	}

	//some crappy security so that only a certain person can upload things
	//we should probably have a users entity in datastore that we manage manually for this kinda thing
	if u.Email != "*****@*****.**" {
		return &appError{
			errors.New("Illegal import attempted by " + u.Email),
			"Your user is not allowed to import",
			http.StatusForbidden,
		}
	}

	//r.ParseMultipartForm(1 << 10)

	file, handler, err := r.FormFile("uploadfile")
	if err != nil {
		return &appError{err, "Error uploading file", http.StatusInternalServerError}
	}
	defer file.Close()

	log.Infof(c, "New import file: %s ", handler.Filename)

	cr := csv.NewReader(file)
	var res []*Resource
	var keys []*datastore.Key

	//at the moment we always insert a new item, this should be an insert or update based on OrganizationName
	//if we get a large enough data set we'll need to implement two loops so that we only batch a certain number of records at a time
	for {
		rec, err := cr.Read()
		if err == io.EOF {
			break
		}
		if err != nil {
			return &appError{err, "Error reading file", http.StatusInternalServerError}
		}

		//if the first row has column headers then skip to the next one
		if strings.ToLower(strings.Trim(rec[1], " ")) == "category" {
			continue
		}

		//Search for this Resource by OrganizationName
		q := datastore.NewQuery("Resource").Filter("organizationname =", rec[2]).KeysOnly().Limit(2)
		tmpKey := datastore.NewIncompleteKey(c, "Resource", nil)
		if tmpKeys, err := q.GetAll(c, nil); len(tmpKeys) == 1 && err == nil {
			tmpKey = tmpKeys[0]
		}

		//we may want IDs in there eventually
		//_, err = strconv.ParseInt(rec[0], 2, 64)
		tmp := &Resource{
			Category:         rec[1], //getSliceFromString(rec[1]),
			OrganizationName: rec[2],
			Address:          rec[3],
			ZipCode:          rec[4],
			Days:             GetDays(rec[5:8]),
			TimeOpenClose:    GetTimes(rec[5:8], c),
			PeopleServed:     getSliceFromString(rec[8]),
			Description:      rec[9],
			PhoneNumber:      rec[10],
			LastUpdatedBy:    u.Email,
			LastUpdatedTime:  time.Now().UTC(),
			IsActive:         true,
			Location:         appengine.GeoPoint{},
		}

		//log.Infof(c, "len slice check: %x, len rec LatLng check: %x, check for comma: %x", len(rec) > 11, len(rec[11]) > 0, strings.Index(rec[11], ",") != -1)

		if len(rec) > 11 && len(rec[11]) > 0 && strings.Index(rec[11], ",") != -1 {
			tmp.Location.Lng, _ = strconv.ParseFloat(strings.Split(rec[11], ",")[0], 64)
			tmp.Location.Lat, _ = strconv.ParseFloat(strings.Split(rec[11], ",")[1], 64)
			//log.Println(tmp.Location)
		}

		res = append(res, tmp)

		keys = append(keys, tmpKey)
	}

	_, err = datastore.PutMulti(c, keys, res)
	if err != nil {
		log.Debugf(c, err.Error())
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return &appError{err, "Error updating database", http.StatusInternalServerError}
	}

	// clear the cache
	memcache.Flush(c)

	http.Redirect(w, r, "/index.html", http.StatusFound)
	return nil
}