Esempio n. 1
0
// insertTableToBigQuery inserts a single table to BigQuery using BigQuery's InsertAll request.
//
// This function is assigned to Streamer.insertTable member.
// It is overridable so we can test Streamer without actually inserting anythin to BigQuery.
func (b *Streamer) insertTableToBigQuery(projectID, datasetID, tableID string, t table) (
	r *bigquery.TableDataInsertAllResponse, err error) {
	// Convert all rows to bigquery table rows.
	rows := make([]*bigquery.TableDataInsertAllRequestRows, len(t))
	for i, row := range t {
		rows[i] = &bigquery.TableDataInsertAllRequestRows{
			InsertId: row.rowID,
			Json:     row.jsonValue}
	}

	// Generate request, tabledata and send.
	request := bigquery.TableDataInsertAllRequest{Kind: "bigquery#tableDataInsertAllRequest", Rows: rows}

	// TODO might be better to cache table services somehow, instead of re-creating them on every flush.
	tableService := bigquery.NewTabledataService(b.service)

	r, err = tableService.InsertAll(projectID, datasetID, tableID, &request).Do()

	return
}
Esempio n. 2
0
func newTableStreamer(streamer *Streamer, table string, suffix func() string) *tableStreamer {
	ts := &tableStreamer{
		streamer: streamer,
		service:  bigquery.NewTabledataService(streamer.service),
		table:    table,
		suffix:   suffix,

		incoming: make(chan interface{}, bufferSize),
		stop:     make(chan struct{}),

		lastID: rand.Int63(),

		flushInterval: 10 * time.Second,
		flushMax:      bufferSize,
		crankiness:    backoff.NewExponentialBackOff(),
	}
	ts.crankiness.MaxElapsedTime = 0
	ts.crankiness.InitialInterval = 2 * time.Second
	ts.crankiness.NextBackOff()
	return ts
}