コード例 #1
0
ファイル: search.go プロジェクト: BurntSushi/goim
// MPAA adds the MPAA rating to the search. Only results with the given MPAA
// rating are returned. If multiple MPAA ratings are specified in the search,
// then they are combined disjunctively.
// The MPAA rating must correspond to one of the ratings in imdb.EnumMPAA (case
// insensitive). Otherwise, it will be silently ignored.
func (s *Searcher) MPAA(name string) *Searcher {
	name = strings.ToUpper(name)
	if fun.In(name, imdb.EnumMPAA) {
		s.mpaas = append(s.mpaas, name)
	}
	return s
}
コード例 #2
0
ファイル: search.go プロジェクト: BurntSushi/goim
// Genre adds the named genre to the search. Results only belonging to the
// genre given are returned. If multiple genres are specified in the search,
// then they are combined disjunctively.
// The genre name must correspond to one of the names in imdb.EnumGenres (case
// insensitive). Otherwise, it will be silently ignored.
func (s *Searcher) Genre(name string) *Searcher {
	name = strings.ToLower(name)
	if fun.In(name, imdb.EnumGenres) {
		s.genres = append(s.genres, name)
	}
	return s
}
コード例 #3
0
ファイル: cmd_load.go プロジェクト: BurntSushi/goim
// Returns a list of tables that have indices which are modified by updating
// the lists given.
// Each table name will only appear once.
//
// If the name/atom table has more than 0 rows, then it will not be included
// in the list returned. This prevents rebuilding the indices on each update,
// which usually contains nominal additions to the name/atom table.
func tablesFromLists(db *imdb.DB, lists []string) (tables []string, err error) {
	var pre []string
	for _, name := range lists {
		tablesForList, ok := listTables[name]
		if !ok {
			return nil, ef("BUG: Could not find tables for list %s", name)
		}
		pre = append(pre, tablesForList...)
	}
	pre = fun.Keys(fun.Set(pre)).([]string)

	updatingEmpty := func(table string) bool {
		return rowCount(db, table) == 0 && fun.In(table, pre)
	}
	for _, table := range pre {
		switch table {
		case "atom", "name":
			// This is a little complex. Basically, we want to avoid rebuilding
			// indices for incremental updates. So we only let it happen when
			// we're updating the actor or movie lists from scratch.
			// (In general, this should apply to any table that is updated
			// concurrently with name/atom. We exclude tvshow and episode since
			// they are only updated when movie is updated.)
			if updatingEmpty("actor") || updatingEmpty("movie") {
				tables = append(tables, table)
			}
		default:
			tables = append(tables, table)
		}
	}
	return
}
コード例 #4
0
ファイル: migrations.go プロジェクト: BurntSushi/goim
func doIndices(
	db *DB,
	getSql func(index, *DB) string,
	tables ...string,
) (err error) {
	defer csql.Safe(&err)

	trgmEnabled := db.IsFuzzyEnabled()
	var q string
	var ok bool
	for _, idx := range indices {
		if idx.isFulltext() && !trgmEnabled {
			// Only show the error message if we're on PostgreSQL.
			if db.Driver == "postgres" {
				log.Printf("Skipping fulltext index '%s' since "+
					"the pg_trgm extension is not enabled.", idx.sqlName())
			}
			continue
		}
		if len(tables) == 0 || fun.In(idx.table, tables) {
			q += getSql(idx, db) + "; "
			ok = true
		}
	}
	if ok {
		csql.Exec(db, q)
	}
	return
}
コード例 #5
0
ファイル: command.go プロジェクト: BurntSushi/goim
func (c *command) showFlags() {
	hide := []string{"cpu-prof", "quiet", "cpu"}
	c.flags.VisitAll(func(fl *flag.Flag) {
		if fun.In(fl.Name, hide) {
			return
		}
		var def string
		if len(fl.DefValue) > 0 {
			def = fmt.Sprintf(" (default: %s)", fl.DefValue)
		} else {
			def = " (default: \"\")"
		}
		usage := strings.Replace(fl.Usage, "\n", "\n    ", -1)
		pf("-%s%s\n", fl.Name, def)
		pf("    %s\n", usage)
	})
}
コード例 #6
0
ファイル: cmd_load.go プロジェクト: BurntSushi/goim
func cmd_load(c *command) bool {
	driver, dsn := c.dbinfo()
	db := openDb(driver, dsn)
	defer closeDb(db)

	// With SQLite, we can get some performance benefit by disabling
	// synchronous writes.
	// It is still safe from application crashes (e.g., bugs in Goim), but
	// not safe from power failures or operating system crashes.
	// I think we're OK with that, right?
	if db.Driver == "sqlite3" {
		_, err := db.Exec("PRAGMA synchronous = OFF")
		if err != nil {
			pef("Could not disable SQLite synchronous mode: %s", err)
			return false
		}
	}

	// Figure out which lists we're loading and make sure each list name is
	// valid before proceeding.
	var userLoadLists []string
	if flagLoadLists == "all" {
		userLoadLists = loadLists
	} else if flagLoadLists == "attr" {
		for _, name := range loadLists {
			if name == "movies" || name == "actors" {
				continue
			}
			userLoadLists = append(userLoadLists, name)
		}
	} else {
		for _, name := range strings.Split(flagLoadLists, ",") {
			name = strings.ToLower(strings.TrimSpace(name))
			if !fun.In(name, loadLists) {
				pef("%s is not a valid list name. See 'goim help load'.", name)
				return false
			}
			userLoadLists = append(userLoadLists, name)
		}
	}

	// Build the "fetcher" to retrieve lists (whether it be from the file
	// system, HTTP or FTP).
	getFrom := c.flags.Arg(0)
	if len(getFrom) == 0 {
		getFrom = "berlin"
	}

	// Just print the URLs to download.
	if flagLoadUrls {
		fetch := newFetcher(getFrom)
		if fetch == nil {
			return false
		}
		for _, list := range userLoadLists {
			pf("%s\n", fetch.location(list))
			if list == "actors" {
				pf("%s\n", fetch.location("actresses"))
			}
		}
		return true
	}

	// If we're downloading, then just do that and quit.
	if len(flagLoadDownload) > 0 {
		// We're just saving to disk, so no need to decompress. Get a plain
		// fetcher.
		fetch := newFetcher(getFrom)
		if fetch == nil {
			return false
		}

		download := func(name string) struct{} {
			if err := downloadList(fetch, name); err != nil {
				pef("%s", err)
			}
			if name == "actors" {
				if err := downloadList(fetch, "actresses"); err != nil {
					pef("%s", err)
				}
			}
			return struct{}{}
		}
		conns := maxFtpConns
		if flagCpu < conns {
			conns = flagCpu
		}
		fun.ParMapN(download, userLoadLists, conns)
		return true
	}

	// We'll be reading, so get a gzip fetcher.
	fetch := newGzipFetcher(getFrom)
	if fetch == nil {
		return false
	}

	// Get the tables with indices corresponding to the lists we're updating.
	tables, err := tablesFromLists(db, userLoadLists)
	if err != nil {
		pef("%s", err)
		return false
	}
	logf("Dropping indices for: %s", strings.Join(tables, ", "))
	if err := db.DropIndices(tables...); err != nil {
		pef("Could not drop indices: %s", err)
		return false
	}

	// Before launching into loading---which can be done in parallel---we need
	// to load movies and actors first since they insert data that most of the
	// other lists depend on. Also, they cannot be loaded in parallel since
	// they are the only loaders that *add* atoms to the database.
	if in := loaderIndex("movies", userLoadLists); in > -1 {
		if err := loadMovies(driver, dsn, fetch); err != nil {
			pef("%s", err)
			return false
		}
		userLoadLists = append(userLoadLists[:in], userLoadLists[in+1:]...)
	}
	if in := loaderIndex("actors", userLoadLists); in > -1 {
		if err := loadActors(driver, dsn, fetch); err != nil {
			pef("%s", err)
			return false
		}
		userLoadLists = append(userLoadLists[:in], userLoadLists[in+1:]...)
	}

	// This must be done after movies/actors are loaded so that we get all
	// of their atoms.
	if len(userLoadLists) > 0 {
		logf("Reading atom identifiers from database...")
		atoms, err := newAtomizer(db, nil) // read-only
		if err != nil {
			pef("%s", err)
			return false
		}
		simpleLoad := func(name string) bool {
			loader := simpleLoaders[name]
			if loader == nil {
				// This is a bug since we should have verified all list names.
				logf("BUG: %s does not have a simpler loader.", name)
				return true
			}

			db := openDb(driver, dsn)
			defer closeDb(db)

			list, err := fetch.list(name)
			if err != nil {
				pef("%s", err)
				return false
			}
			defer list.Close()

			if err := loader(db, atoms, list); err != nil {
				pef("Could not store %s list: %s", name, err)
				return false
			}
			return true
		}

		// SQLite doesn't handle concurrent writes very well, so force it
		// to be single-threaded. Also, we've got to limit connections if
		// we're fetching from FTP too.
		maxConcurrent := flagCpu
		if db.Driver == "sqlite3" {
			maxConcurrent = 1
		} else {
			switch fetch.(gzipFetcher).fetcher.(type) {
			case ftpFetcher:
				maxConcurrent = maxFtpConns
			}
		}
		fun.ParMapN(simpleLoad, userLoadLists, maxConcurrent)
	}

	logf("Creating indices for: %s", strings.Join(tables, ", "))
	if err := db.CreateIndices(tables...); err != nil {
		pef("Could not create indices: %s", err)
		return false
	}
	return true
}