Example #1
0
func (t *Table) customizeQueryDst(conf *bq.JobConfigurationQuery, projectID string) {
	if !t.implicitTable() {
		conf.DestinationTable = t.tableRefProto()
	}
	conf.CreateDisposition = string(t.CreateDisposition)
	conf.WriteDisposition = string(t.WriteDisposition)
}
Example #2
0
func (q *Query) customizeQuerySrc(conf *bq.JobConfigurationQuery, projectID string) {
	conf.Query = q.Q
	if q.DefaultProjectID != "" || q.DefaultDatasetID != "" {
		conf.DefaultDataset = &bq.DatasetReference{
			DatasetId: q.DefaultDatasetID,
			ProjectId: q.DefaultProjectID,
		}
	}
}
Example #3
0
func (opt jobPriority) customizeQuery(conf *bq.JobConfigurationQuery, projectID string) {
	conf.Priority = string(opt)
}
Example #4
0
func (opt useQueryCache) customizeQuery(conf *bq.JobConfigurationQuery, projectID string) {
	conf.UseQueryCache = true
}
Example #5
0
func (opt allowLargeResults) customizeQuery(conf *bq.JobConfigurationQuery) {
	conf.AllowLargeResults = true
}
Example #6
0
func (opt disableFlattenedResults) customizeQuery(conf *bq.JobConfigurationQuery) {
	f := false
	conf.FlattenResults = &f
	// DisableFlattenedResults implies AllowLargeResults
	allowLargeResults{}.customizeQuery(conf)
}
Example #7
0
func (opt disableQueryCache) customizeQuery(conf *bq.JobConfigurationQuery) {
	f := false
	conf.UseQueryCache = &f
}
Example #8
0
func (opt maxBytesBilled) customizeQuery(conf *bq.JobConfigurationQuery) {
	if opt >= 1 {
		conf.MaximumBytesBilled = int64(opt)
	}
}
Example #9
0
func (opt TableCreateDisposition) customizeQuery(conf *bq.JobConfigurationQuery) {
	conf.CreateDisposition = string(opt)
}
Example #10
0
func (opt disableQueryCache) customizeQuery(conf *bq.JobConfigurationQuery, projectID string) {
	f := false
	conf.UseQueryCache = &f
}
Example #11
0
func (opt allowLargeResults) customizeQuery(conf *bq.JobConfigurationQuery, projectID string) {
	conf.AllowLargeResults = true
}
Example #12
0
func (q *QueryConfig) populateJobQueryConfig(conf *bq.JobConfigurationQuery) error {
	conf.Query = q.Q

	if len(q.TableDefinitions) > 0 {
		conf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
	}
	for name, data := range q.TableDefinitions {
		conf.TableDefinitions[name] = data.externalDataConfig()
	}

	if q.DefaultProjectID != "" || q.DefaultDatasetID != "" {
		conf.DefaultDataset = &bq.DatasetReference{
			DatasetId: q.DefaultDatasetID,
			ProjectId: q.DefaultProjectID,
		}
	}

	if tier := int64(q.MaxBillingTier); tier > 0 {
		conf.MaximumBillingTier = &tier
	}
	conf.CreateDisposition = string(q.CreateDisposition)
	conf.WriteDisposition = string(q.WriteDisposition)
	conf.AllowLargeResults = q.AllowLargeResults
	conf.Priority = string(q.Priority)

	f := false
	if q.DisableQueryCache {
		conf.UseQueryCache = &f
	}
	if q.DisableFlattenedResults {
		conf.FlattenResults = &f
		// DisableFlattenResults implies AllowLargeResults.
		conf.AllowLargeResults = true
	}
	if q.MaxBytesBilled >= 1 {
		conf.MaximumBytesBilled = q.MaxBytesBilled
	}
	if q.UseStandardSQL || len(q.Parameters) > 0 {
		conf.UseLegacySql = false
		conf.ForceSendFields = append(conf.ForceSendFields, "UseLegacySql")
	}

	if q.Dst != nil && !q.Dst.implicitTable() {
		conf.DestinationTable = q.Dst.tableRefProto()
	}
	for _, p := range q.Parameters {
		qp, err := p.toRaw()
		if err != nil {
			return err
		}
		conf.QueryParameters = append(conf.QueryParameters, qp)
	}
	return nil
}
Example #13
0
func (opt queryUseStandardSQL) customizeQuery(conf *bq.JobConfigurationQuery) {
	conf.UseLegacySql = false
	conf.ForceSendFields = append(conf.ForceSendFields, "UseLegacySql")
}
Example #14
0
func (t *Table) customizeQueryDst(conf *bq.JobConfigurationQuery, projectID string) {
	if !t.implicitTable() {
		conf.DestinationTable = t.tableRefProto()
	}
}
Example #15
0
func (opt maxBillingTier) customizeQuery(conf *bq.JobConfigurationQuery) {
	tier := int64(opt)
	conf.MaximumBillingTier = &tier
}
Example #16
0
func (opt TableWriteDisposition) customizeQuery(conf *bq.JobConfigurationQuery, projectID string) {
	conf.WriteDisposition = string(opt)
}
Example #17
0
// largeDataPagedQuery builds a job and inserts it into the job queue allowing the flexibility to set the custom AllowLargeResults flag for the job
func (c *Client) largeDataPagedQuery(service *bigquery.Service, pageSize int, dataset, project, queryStr string, dataChan chan Data) ([][]interface{}, []string, error) {
	c.printDebug("largeDataPagedQuery starting")
	ts := time.Now()
	// start query
	tableRef := bigquery.TableReference{DatasetId: dataset, ProjectId: project, TableId: c.tempTableName}
	jobConfigQuery := bigquery.JobConfigurationQuery{}

	datasetRef := &bigquery.DatasetReference{
		DatasetId: dataset,
		ProjectId: project,
	}

	jobConfigQuery.AllowLargeResults = true
	jobConfigQuery.Query = queryStr
	jobConfigQuery.DestinationTable = &tableRef
	jobConfigQuery.DefaultDataset = datasetRef
	if !c.flattenResults {
		c.printDebug("setting FlattenResults to false")
		// need a pointer to bool
		f := false
		jobConfigQuery.FlattenResults = &f
	}
	jobConfigQuery.WriteDisposition = "WRITE_TRUNCATE"
	jobConfigQuery.CreateDisposition = "CREATE_IF_NEEDED"

	jobConfig := bigquery.JobConfiguration{}

	jobConfig.Query = &jobConfigQuery

	job := bigquery.Job{}
	job.Configuration = &jobConfig

	jobInsert := service.Jobs.Insert(project, &job)
	runningJob, jerr := jobInsert.Do()

	if jerr != nil {
		c.printDebug("Error inserting job!", jerr)
		if dataChan != nil {
			dataChan <- Data{Err: jerr}
		}
		return nil, nil, jerr
	}

	qr, err := service.Jobs.GetQueryResults(project, runningJob.JobReference.JobId).Do()

	if err != nil {
		c.printDebug("Error loading query: ", err)
		if dataChan != nil {
			dataChan <- Data{Err: err}
		}
		return nil, nil, err
	}

	var headers []string
	rows := [][]interface{}{}

	// if query is completed process, otherwise begin checking for results
	if qr.JobComplete {
		c.printDebug("job complete, got rows", len(qr.Rows))
		headers, rows = c.headersAndRows(qr.Schema, qr.Rows)
		if dataChan != nil {
			dataChan <- Data{Headers: headers, Rows: rows}
		}
	}

	if !qr.JobComplete {
		resultChan := make(chan [][]interface{})
		headersChan := make(chan []string)

		go c.pageOverJob(len(rows), runningJob.JobReference, qr.PageToken, resultChan, headersChan)

	L:
		for {
			select {
			case h, ok := <-headersChan:
				if ok {
					c.printDebug("got headers")
					headers = h
				}
			case newRows, ok := <-resultChan:
				if !ok {
					break L
				}
				if dataChan != nil {
					c.printDebug("got rows", len(newRows))
					dataChan <- Data{Headers: headers, Rows: newRows}
				} else {
					rows = append(rows, newRows...)
				}
			}
		}
	}

	if dataChan != nil {
		close(dataChan)
	}
	c.printDebug("largeDataPagedQuery completed in ", time.Now().Sub(ts).Seconds(), "s")
	return rows, headers, nil
}