// TestCopyOne verifies that only one COPY can run at once. func TestCopyOne(t *testing.T) { defer leaktest.AfterTest(t)() t.Skip("https://github.com/lib/pq/issues/494") params, _ := createTestServerParams() s, db, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop() if _, err := db.Exec(` CREATE DATABASE d; SET DATABASE = d; CREATE TABLE t ( i INT PRIMARY KEY ); `); err != nil { t.Fatal(err) } txn, err := db.Begin() if err != nil { t.Fatal(err) } if _, err := txn.Prepare(pq.CopyIn("t", "i")); err != nil { t.Fatal(err) } if _, err := txn.Prepare(pq.CopyIn("t", "i")); err == nil { t.Fatal("expected error") } }
func (qs *QuadStore) copyFrom(tx *sql.Tx, in []graph.Delta) error { stmt, err := tx.Prepare(pq.CopyIn("quads", "subject", "predicate", "object", "label", "id", "ts", "subject_hash", "predicate_hash", "object_hash", "label_hash")) if err != nil { return err } for _, d := range in { _, err := stmt.Exec( d.Quad.Subject, d.Quad.Predicate, d.Quad.Object, d.Quad.Label, d.ID.Int(), d.Timestamp, hashOf(d.Quad.Subject), hashOf(d.Quad.Predicate), hashOf(d.Quad.Object), hashOf(d.Quad.Label), ) if err != nil { glog.Errorf("couldn't prepare COPY statement: %v", err) return err } } _, err = stmt.Exec() if err != nil { return err } return stmt.Close() }
func bulkDirect(db *sql.DB) error { tx, err := db.Begin() if err != nil { return err } stmt, err := tx.Prepare(pq.CopyIn("foo", "id", "val")) if err != nil { tx.Rollback() return err } // Build up the COPY FROM by repeated Exec calls for i := 0; i < *Count; i++ { val := *Start + i strVal := fmt.Sprintf("%07d", val) if _, err := stmt.Exec(val, strVal); err != nil { fmt.Printf("error while building up COPY: %s\n", err) tx.Rollback() return err } } // Flush the COPY FROM to postgres if _, err := stmt.Exec(); err != nil { fmt.Printf("error while copying data: %s\n", err) return err } if err := stmt.Close(); err != nil { fmt.Printf("error during stmt.Close(): %s\n", err) return err } if err := tx.Commit(); err != nil { fmt.Printf("could not commit transaction: %s\n", err) return err } return nil }
func (db *Database) UpdateDivision(divisionId int, units []Unit) { tx, err := db.db.Begin() if err != nil { panic(err) } _, err = tx.Query("DELETE FROM unit WHERE unit.division = $1", divisionId) if err != nil { panic(err) } stmt, err := tx.Prepare(pq.CopyIn("unit", "division", "unit_type", "amount")) for _, unit := range units { _, err = stmt.Exec(divisionId, unit.TypeNum, unit.Amount) if err != nil { panic(err) } } _, err = stmt.Exec() if err != nil { panic(err) } err = stmt.Close() if err != nil { panic(err) } err = tx.Commit() if err != nil { panic(err) } }
func (ops *DBOps) CopyDataIntoTable(t []Stock) { txn, err := ops.Db.Begin() if err != nil { log.Fatal(err) } defer txn.Rollback() stmt, err := txn.Prepare(pq.CopyIn("stock", "dealtime", "price", "volume", "amount", "climax")) if err != nil { log.Fatal(err) } for _, item := range t { _, err = stmt.Exec(item.Dealtime, item.Price, item.Volume, item.Amount, item.Climax) if err != nil { log.Fatal(err) } } _, err = stmt.Exec() if err != nil { log.Fatal(err) } err = stmt.Close() if err != nil { log.Fatal(err) } err = txn.Commit() if err != nil { log.Fatal(err) } }
func insertShooters(db *sql.DB, m *Match) { txn, err := db.Begin() if err != nil { log.Fatal(err) } stmt, err := txn.Prepare(pq.CopyIn("users", "name", "countryCode", "mos", "flags")) if err != nil { log.Fatal(err) } for _, results := range m.Results { for _, result := range results { _, err = stmt.Exec(result.Name, result.CountryCode, result.MosNumber, result.Flags) if err != nil { log.Fatal(err) } } } _, err = stmt.Exec() if err != nil { log.Fatal(err) } err = stmt.Close() if err != nil { log.Fatal(err) } err = txn.Commit() if err != nil { log.Fatal(err) } }
func createSubscriptions(ids []int) error { info := getDBSettings() db, errCon := sql.Open("postgres", fmt.Sprintf("host=%v user=%v password=%v dbname=%v sslmode=require", info.Host, info.Username, info.Password, info.Database)) defer db.Close() if errCon != nil { log.Fatal(errCon) } txn, errT := db.Begin() if errT != nil { log.Println(errT) return errT } stmt, errPrep := txn.Prepare(pq.CopyIn("subscription", "topicid", "userid")) if errPrep != nil { log.Fatal(errPrep) } log.Println("Start For...") var wg sync.WaitGroup for _, id := range ids { userID := id wg.Add(1) go func(id int) { defer wg.Done() mr.Seed(time.Now().UnixNano()) numSubs := mr.Intn(5) + 1 for i := 0; i < numSubs; i++ { topicID := i + 1 _, errA := stmt.Exec(topicID, userID) if errA != nil { log.Fatal(errA) } } }(userID) } wg.Wait() log.Println("End For") log.Println("Start Exec") _, errEX := stmt.Exec() if errEX != nil { log.Fatal(errEX) } log.Println("End Exec") errClose := stmt.Close() if errClose != nil { log.Fatal(errClose) } log.Println("Start Commit") errCommit := txn.Commit() if errCommit != nil { log.Fatal(errCommit) } log.Println("End Commit") return nil }
func (dfr *DataFileReader) executeCopyLine(line string, c *spiffy.DbConnection, tx *sql.Tx) (*sql.Stmt, error) { pieces := dfr.extractCopyLine(line) if len(pieces) < 3 { return nil, exception.New("Invalid `COPY ...` line, cannot continue.") } tableName := pieces[1] columnCSV := pieces[2] columns := strings.Split(columnCSV, ", ") return tx.Prepare(pq.CopyIn(tableName, columns...)) }
func createLang(ids []int) error { info := getDBSettings() db, errCon := sql.Open("postgres", fmt.Sprintf("host=%v user=%v password=%v dbname=%v sslmode=require", info.Host, info.Username, info.Password, info.Database)) defer db.Close() if errCon != nil { log.Fatal(errCon) } txn, errT := db.Begin() if errT != nil { log.Println(errT) return errT } stmt, errPrep := txn.Prepare(pq.CopyIn("userinfo", "userid", "langcode")) if errPrep != nil { log.Fatal(errPrep) } log.Println("Start For...") var wg sync.WaitGroup for _, id := range ids { userID := id wg.Add(1) go func(id int) { defer wg.Done() _, errA := stmt.Exec(userID, "en") if errA != nil { log.Fatal(errA) } }(userID) } wg.Wait() log.Println("End For") log.Println("Start Exec") _, errEX := stmt.Exec() if errEX != nil { log.Fatal(errEX) } log.Println("End Exec") errClose := stmt.Close() if errClose != nil { log.Fatal(errClose) } log.Println("Start Commit") errCommit := txn.Commit() if errCommit != nil { log.Fatal(errCommit) } log.Println("End Commit") return nil }
func commitData(db *sql.DB, data map[string]string, columns ...string) { var dataitems = []string{} columns = append(columns, "symbol") for k, v := range columns { if k == 0 && len(data[v]) == 0 { break } dataitems = append(dataitems, data[v]) } if len(dataitems) > 0 { fmt.Printf("%v{%d}\n", dataitems, len(dataitems)) txn, err := db.Begin() if err != nil { log.Fatal(err) } stmt, err := txn.Prepare(pq.CopyIn("balancesheet", columns...)) if err != nil { log.Fatal(err) } new := make([]interface{}, len(dataitems)) for i, v := range dataitems { new[i] = v } _, err = stmt.Exec(new...) if err != nil { log.Fatal(err) } _, err = stmt.Exec() if err != nil { log.Fatal(err) } err = stmt.Close() if err != nil { log.Fatal(err) } err = txn.Commit() if err != nil { log.Fatal(err) } } }
// insert operates on a list of dumpFormat and inserts them to the provided Postgres // database. func (data dataset) insert(db *sql.DB) error { transaction, err := db.Begin() if err != nil { return fmt.Errorf("Error starting PG txn => %s", err.Error()) } // PG's COPY FROM used for fast mass insertions. Syntax is table followed by columns. // http://godoc.org/github.com/lib/pq#hdr-Bulk_imports stmt, err := transaction.Prepare(pq.CopyIn( "density_data", // table "dump_time", // columns..... "group_id", "group_name", "parent_id", "parent_name", "client_count", )) if err != nil { return fmt.Errorf("Error prepping PG txn => %s", err.Error()) } defer stmt.Close() // Add all data from the set for _, d := range data { _, err = stmt.Exec( d.DumpTime, d.GroupID, d.GroupName, d.ParentID, d.ParentName, d.ClientCount, ) if err != nil { return fmt.Errorf("Failed to add to bulk insert => %s", err.Error()) } } // execute the transaction if _, err = stmt.Exec(); err != nil { return fmt.Errorf("Failed to execute bulk insert => %s", err.Error()) } // commit the transaction if there's been no errors if err = transaction.Commit(); err != nil { log.Printf("ERROR: Failed to commit txn => %s", err.Error()) if err = transaction.Rollback(); err != nil { log.Printf("ERROR: Failed to rollback txn => %s", err.Error()) } } return nil }
func TestCopyError(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := createTestServerParams() s, db, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop() if _, err := db.Exec(` CREATE DATABASE d; SET DATABASE = d; CREATE TABLE t ( i INT PRIMARY KEY ); `); err != nil { t.Fatal(err) } txn, err := db.Begin() if err != nil { t.Fatal(err) } stmt, err := txn.Prepare(pq.CopyIn("t", "i")) if err != nil { t.Fatal(err) } // Insert conflicting primary keys. for i := 0; i < 2; i++ { _, err = stmt.Exec(1) if err != nil { t.Fatal(err) } } err = stmt.Close() if err == nil { t.Fatal("expected error") } // Make sure we can query after an error. var i int if err := db.QueryRow("SELECT 1").Scan(&i); err != nil { t.Fatal(err) } else if i != 1 { t.Fatalf("expected 1, got %d", i) } if err := txn.Rollback(); err != nil { t.Fatal(err) } }
func InsertMultiDataValues(db *sql.DB, table_name string, table_columns []string, data [][]interface{}) error { // Transaction Begins and must end with a commit or rollback transaction, err := db.Begin() if err != nil { transaction.Rollback() return err } // Preparing statement with the table name and columns passed statement, err := transaction.Prepare(pq.CopyIn(table_name, table_columns...)) if err != nil { statement.Close() transaction.Rollback() return err } // Looping though all the data rows passed for _, data_row := range data { // Inserting Single Data row into the statement _, err = statement.Exec(data_row...) if err != nil { statement.Close() transaction.Rollback() return err } } /* _, err = stmt.Exec() if err != nil { return err } */ // Closing the connection of the statement err = statement.Close() if err != nil { statement.Close() transaction.Rollback() return err } // Commiting and closing the transaction saving changes we have made in the database err = transaction.Commit() if err != nil { transaction.Rollback() return err } return nil }
// TestCopyTransaction verifies that COPY data can be used after it is done // within a transaction. func TestCopyTransaction(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := createTestServerParams() s, db, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop() if _, err := db.Exec(` CREATE DATABASE d; SET DATABASE = d; CREATE TABLE t ( i INT PRIMARY KEY ); `); err != nil { t.Fatal(err) } txn, err := db.Begin() if err != nil { t.Fatal(err) } stmt, err := txn.Prepare(pq.CopyIn("t", "i")) if err != nil { t.Fatal(err) } const val = 2 _, err = stmt.Exec(val) if err != nil { t.Fatal(err) } if err = stmt.Close(); err != nil { t.Fatal(err) } var i int if err := txn.QueryRow("SELECT i FROM d.t").Scan(&i); err != nil { t.Fatal(err) } else if i != val { t.Fatalf("expected 1, got %d", i) } if err := txn.Commit(); err != nil { t.Fatal(err) } }
func NewInserter( tx *sql.Tx, driver string, table string, columns ...string, ) (ins *Inserter, err error) { defer Safe(&err) ins = &Inserter{tx, table, columns, driver, nil} if driver == "postgres" { ins.stmt = Prepare(tx, pq.CopyIn(table, columns...)) } else { ins.stmt = ins.preparedInsert() } return }
func loadEventsPage(page Page, db *sql.DB) error { startPage := time.Now() tx, err := db.Begin() if err != nil { return err } statement, err := tx.Prepare(pq.CopyIn("charges", "id", "amount", "created", "sequence")) if err != nil { return err } for _, event := range page.Data { switch event.Type { case "charge.created": _, err = statement.Exec( // TODO: deserialize to proper charge object event.Data.Obj["id"].(string), uint64(event.Data.Obj["amount"].(float64)), time.Unix(int64(event.Data.Obj["created"].(float64)), 0), // TODO: should actually be in its own table event.Sequence, ) } } _, err = statement.Exec() if err != nil { return err } err = statement.Close() if err != nil { return err } err = tx.Commit() if err != nil { return err } log.Printf("Loaded page of %v event(s) in %v.", len(page.Data), time.Now().Sub(startPage)) return nil }
func saveDailyQuotes(in <-chan (*Response), db *sql.DB) { for res := range in { if res != nil && len(res.Query.Results.DailyTicks) > 0 { fmt.Printf("Saving quotes for %s...\n", res.Query.Results.DailyTicks[0].Symbol) tx, err := db.Begin() if err != nil { fmt.Println("error creating db tx") return } stmt, err := tx.Prepare(pq.CopyIn("daily_quotes_hst", "symbol", "trade_dt", "open", "high", "low", "close", "volume", "adj_close")) if err != nil { fmt.Println(err.Error()) return } for i := 0; i < len(res.Query.Results.DailyTicks); i++ { q := res.Query.Results.DailyTicks[i] _, err = stmt.Exec(q.Symbol, q.Date, q.Open, q.High, q.Low, q.Close, q.Volume, q.AdjClose) if err != nil { fmt.Printf("error saving quotes to db tx: %s\n", err.Error()) break } } _, err = stmt.Exec() if err != nil { fmt.Printf("error saving quotes to db tx: %s\n", err.Error()) } err = stmt.Close() if err != nil { fmt.Printf("error closing stmt: %s\n", err.Error()) } err = tx.Commit() if err != nil { fmt.Printf("error commiting tx\n") return } fmt.Printf("Total daily ticks for %s : %d\n", res.Query.Results.DailyTicks[0].Symbol, len(res.Query.Results.DailyTicks)) } } }
func (qs *QuadStore) copyFrom(tx *sql.Tx, in []graph.Delta, opts graph.IgnoreOpts) error { panic("broken") stmt, err := tx.Prepare(pq.CopyIn("quads", "subject", "predicate", "object", "label", "id", "ts", "subject_hash", "predicate_hash", "object_hash", "label_hash")) if err != nil { glog.Errorf("couldn't prepare COPY statement: %v", err) return err } for _, d := range in { s, p, o, l, err := marshalQuadDirections(d.Quad) if err != nil { glog.Errorf("couldn't marshal quads: %v", err) return err } _, err = stmt.Exec( s, p, o, l, d.ID.Int(), d.Timestamp, hashOf(d.Quad.Subject), hashOf(d.Quad.Predicate), hashOf(d.Quad.Object), hashOf(d.Quad.Label), ) if err != nil { err = convInsertError(err) glog.Errorf("couldn't execute COPY statement: %v", err) return err } } //if _, err = stmt.Exec(); err != nil { // glog.Errorf("couldn't execute COPY statement 2: %v", err) // return err //} if err = stmt.Close(); err != nil { glog.Errorf("couldn't close COPY statement: %v", err) err = convInsertError(err) return err } return nil }
func (db *Database) CreateDivision(coords Coords, units []Unit, name string, factionId int) int { tx, err := db.db.Begin() if err != nil { panic(err) } row := tx.QueryRow("INSERT INTO division (faction, division_name, route) "+ "VALUES($1, $2, ARRAY[($3, $4)]) RETURNING id", factionId, name, coords.X, coords.Y) var divisionId int err = row.Scan(&divisionId) if err != nil { panic(err) } stmt, err := tx.Prepare(pq.CopyIn("unit", "division", "unit_type", "amount")) for _, unit := range units { _, err = stmt.Exec(divisionId, unit.TypeNum, unit.Amount) if err != nil { panic(err) } } _, err = stmt.Exec() if err != nil { panic(err) } err = stmt.Close() if err != nil { panic(err) } err = tx.Commit() if err != nil { panic(err) } return divisionId }
func (ops *DBOps) AddOneSymbol(symbol string, name string, valid bool) error { txn, err := ops.Db.Begin() if err != nil { log.Println(err.Error()) return err } defer txn.Rollback() stmt, err := txn.Prepare(pq.CopyIn("symbol", "value", "name", "valid", "updatetime")) if err != nil { log.Println(err.Error()) return err } _, err = stmt.Exec(symbol, name, valid, time.Now()) if err != nil { log.Println(err.Error()) return err } _, err = stmt.Exec() if err != nil { log.Println(err.Error()) return err } err = stmt.Close() if err != nil { log.Println(err.Error()) return err } err = txn.Commit() if err != nil { log.Println(err.Error()) return err } return nil }
func gopher(i int, lines []string, waitGroup *sync.WaitGroup) { log.Println("Starting:", i) db := getConnection() defer db.Close() txn, err := db.Begin() if err != nil { log.Fatal(err) } stmt, err := txn.Prepare(pq.CopyIn("logs", "username", "database", "duration", "action", "table_name", "sql", "created_at")) if err != nil { log.Fatal(err) } for _, line := range lines { parseLine(strings.ToLower(line), stmt) } _, err = stmt.Exec() if err != nil { log.Fatal(err) } err = stmt.Close() if err != nil { log.Fatal(err) } err = txn.Commit() if err != nil { log.Fatal(err) } db.Close() log.Println("Finishing:", i) waitGroup.Done() }
func main() { path := os.Args[1] parser, err := manta.NewParserFromFile(path) if err != nil { panic(err) } db, err := sql.Open("postgres", "postgres://*****:*****@localhost/gamevis?sslmode=disable") if err != nil { panic(err) } txn, err := db.Begin() if err != nil { log.Fatal(err) } var sessionId int var propStream *sql.Stmt var events []*EventRow var tickrate int skipProps := map[string]bool{ "m_iCursor.0000": true, "m_iCursor.0001": true, "m_anglediff": true, "m_NetworkActivity": true, "CBodyComponentBaseAnimatingOverlay.m_nNewSequenceParity": true, "CBodyComponentBaseAnimatingOverlay.m_nResetEventsParity": true, "m_NetworkSequenceIndex": true, "CBodyComponentBaseAnimatingOverlay.m_flPlaybackRate": true, "CDOTAGamerules.m_iFoWFrameNumber": true, } entities := make(map[int32](*manta.Properties)) heroes := make(map[int32](*manta.PacketEntity)) // player id -> hero updates := dotautil.NewBufferedUpdates() lastFlush := uint32(0) ENTITY_UPDATE_BUFFER_TICKS := uint32(15) // accumulate buffer updates for `n` ticks before flushing parser.Callbacks.OnCDemoFileHeader(func(header *dota.CDemoFileHeader) error { log.Println(header) trimmed := strings.Trim(*header.DemoFileStamp, "\x00") header.DemoFileStamp = &trimmed jsonHeader, err := json.Marshal(header) if err != nil { log.Fatal(err) } fmt.Print("Creating session...") err = txn.QueryRow("INSERT INTO sessions (title, level, game, data, tickrate) VALUES ($1, $2, $3, $4, 30) RETURNING id", header.GetServerName(), header.GetMapName(), "dota_reborn", jsonHeader).Scan(&sessionId) if err != nil { log.Fatal(err) } fmt.Println("ok", sessionId) fmt.Print("Opening entity props stream...") propStream, err = txn.Prepare(pq.CopyIn("entity_props", "session_id", "index", "tick", "prop", "value") + " WITH NULL 'null'") if err != nil { log.Fatal(err) } fmt.Println("ok") return nil }) parser.Callbacks.OnCDOTAUserMsg_ChatEvent(func(ce *dota.CDOTAUserMsg_ChatEvent) error { row := &EventRow{ Tick: parser.Tick, Name: strings.ToLower(ce.GetType().String()), Data: ce, } locations := make(map[string]dotautil.Vector3) entities := make(map[string]int32) processPlayerIdForEvent := func(keySuffix string, playerIdOpt *int32) { if playerIdOpt == nil || *playerIdOpt == -1 { return } playerId := *playerIdOpt playerEnt, found := dotautil.LookupEntityByPropValue(parser, "m_iPlayerID", playerId) if found { entities["player "+keySuffix] = playerEnt.Index } else { log.Println("unable to find player ID", playerId) } heroEnt, found := heroes[playerId] if found { entities["hero "+keySuffix] = heroEnt.Index loc, err := dotautil.GetEntityLocation(heroEnt) if err == nil { locations["hero "+keySuffix] = *loc } else { log.Println("getEntityLocation:", err) } } else { log.Println("chat event player", playerId, "has no hero") } } processPlayerIdForEvent("1", ce.Playerid_1) processPlayerIdForEvent("2", ce.Playerid_2) processPlayerIdForEvent("3", ce.Playerid_3) processPlayerIdForEvent("4", ce.Playerid_4) processPlayerIdForEvent("5", ce.Playerid_5) processPlayerIdForEvent("6", ce.Playerid_6) if len(locations) > 0 { row.Locations = locations } if len(entities) > 0 { row.Entities = entities } events = append(events, row) return nil }) parser.Callbacks.OnCMsgDOTACombatLogEntry((func(cle *dota.CMsgDOTACombatLogEntry) error { row := &EventRow{ Tick: parser.Tick, Name: strings.ToLower(cle.GetType().String()), Data: cle, } locations := make(map[string]dotautil.Vector3) entities := make(map[string]int32) if cle.LocationX != nil && cle.LocationY != nil { locations["event"] = dotautil.Vector3{cle.GetLocationX(), cle.GetLocationY(), 0} } if cle.EventLocation != nil { playerId := int32(cle.GetEventLocation()) playerEnt, found := dotautil.LookupEntityByPropValue(parser, "m_iPlayerID", playerId) if found { entities["player"] = playerEnt.Index } else { log.Println("event referring to non-existent player ID") } heroEnt, found := heroes[playerId] if found { loc, err := dotautil.GetEntityLocation(heroEnt) if err == nil { locations["hero"] = *loc } else { log.Println("getEntityLocation: ", err) } } else { log.Println("combat log player", playerId, "has no hero") } } if len(locations) > 0 { row.Locations = locations } if len(entities) > 0 { row.Entities = entities } events = append(events, row) return nil })) parser.Callbacks.OnCDemoFileInfo(func(fi *dota.CDemoFileInfo) error { tickrate = round(float64(fi.GetPlaybackTicks()) / float64(fi.GetPlaybackTime())) return nil }) const MAX_CLIENTS = 64 const NUM_ENT_ENTRY_BITS = 14 const NUM_ENT_ENTRIES = 1 << NUM_ENT_ENTRY_BITS const ENT_ENTRY_MASK = NUM_ENT_ENTRIES - 1 parser.OnPacketEntity(func(pe *manta.PacketEntity, event manta.EntityEventType) error { if pe.ClassName != "CDOTA_PlayerResource" { return nil } for i := int32(0); i < MAX_CLIENTS; i++ { heroProp := fmt.Sprintf("m_vecPlayerTeamData.%04d.m_hSelectedHero", i) heroHandle, found := pe.FetchUint32(heroProp) if !found { continue } heroEntry := heroHandle & ENT_ENTRY_MASK if heroEntry == ENT_ENTRY_MASK { continue } heroEnt, found := parser.PacketEntities[int32(heroEntry)] if !found { log.Fatal("could not find entity pointed by handle") } heroes[i] = heroEnt } return nil }) parser.OnPacketEntity(func(pe *manta.PacketEntity, event manta.EntityEventType) error { if event == manta.EntityEventType_Create { properties := manta.NewProperties() entities[pe.Index] = properties } else if event != manta.EntityEventType_Update { return nil } // flush buffered updates if enough ticks have passed if (parser.Tick-lastFlush) > ENTITY_UPDATE_BUFFER_TICKS || lastFlush > parser.Tick { // loop through all of the updates and map 'position' to movement events for index, props := range updates.Entities { // has this entity's position changed? update, found := props["position"] if !found { continue } // is this entity a hero? controllingPlayer := int32(-1) for playerId, ent := range heroes { if ent.Index == index { controllingPlayer = playerId break } } if controllingPlayer < 0 { continue } playerEnt, found := dotautil.LookupEntityByPropValue(parser, "m_iPlayerID", controllingPlayer) if !found { panic("unable to find player ID") } // due to Go's very strong typing, this is the nicest way to // unbox the new position value pos, ok := (update.Value.(*PropValueColumn)).Value.(*dotautil.Vector3) if !ok { panic("position was not a Vector3") } row := &EventRow{ Tick: update.Tick, Name: "hero_move", Locations: map[string]dotautil.Vector3{ "hero": *pos, }, Entities: map[string]int32{ "hero": index, "player": playerEnt.Index, }, Data: map[string]interface{}{ "playerid": controllingPlayer, }, } events = append(events, row) } updates.Flush(sessionId, propStream) lastFlush = parser.Tick } for prop, value := range pe.Properties.KV { // skip uninteresting props which change often if _, skip := skipProps[prop]; skip { continue } oldValue, found := entities[pe.Index].Fetch(prop) if found && reflect.DeepEqual(value, oldValue) { continue } dbProp, dbValue, err := processPropChange(pe, prop, value) if err != nil { log.Fatal(err) } updates.Buffer(pe.Index, dbProp, parser.Tick, dbValue) // merge entities[pe.Index].KV[prop] = value } return nil }) parser.AfterStopCallback = func() { fmt.Print("Final flush...") updates.Flush(sessionId, propStream) fmt.Println("ok") fmt.Print("Waiting writer routines to complete...") updates.WG.Wait() fmt.Println("ok") fmt.Print("Finalising entity prop stream...") _, err = propStream.Exec() if err != nil { log.Fatal(err) } fmt.Println("ok") fmt.Print("Closing entity prop stream...") err = propStream.Close() if err != nil { log.Fatal(err) } fmt.Println("ok") fmt.Print("Opening events stream...") eventStream, err := txn.Prepare(pq.CopyIn("events", "session_id", "tick", "name", "data", "locations", "entities") + " WITH NULL 'null'") if err != nil { log.Fatal(err) } fmt.Println("ok") for _, event := range events { dataJson, err := json.Marshal(event.Data) if err != nil { log.Fatal(err) } locationsJson, err := json.Marshal(event.Locations) if err != nil { log.Fatal(err) } entitiesJson, err := json.Marshal(event.Entities) if err != nil { log.Fatal(err) } _, err = eventStream.Exec(sessionId, event.Tick, event.Name, string(dataJson), string(locationsJson), string(entitiesJson)) if err != nil { log.Fatal(err) } } fmt.Print("Finalising event stream...") _, err = eventStream.Exec() if err != nil { log.Fatal(err) } fmt.Println("ok") fmt.Print("Closing event stream...") err = eventStream.Close() if err != nil { log.Fatal(err) } fmt.Println("ok") fmt.Print("Updating tickrate...") _, err = txn.Exec("UPDATE sessions SET tickrate=$1 WHERE id=$2", tickrate, sessionId) if err != nil { log.Fatal(err) } fmt.Println("ok", tickrate) fmt.Print("Committing transaction...") err = txn.Commit() if err != nil { log.Fatal(err) } fmt.Println("ok") } parser.Start() }
func (db DB) Insert(iter decoders.Iterator) (err error) { log.Println("Beginning transaction...") // Begin transaction. Required for bulk insert txn, err := db.conn.Begin() if err != nil { return } // Prepare bulk insert statement stmt, err := txn.Prepare(pq.CopyIn("data_raw", "serial", "type", "data", "time", "device")) // Cleanup either when done or in the case of an error defer func() { log.Println("Closing off transaction...") if stmt != nil { // Flush buffer if _, eerr := stmt.Exec(); eerr != nil { if err == nil { err = eerr } } // Close prepared statement if cerr := stmt.Close(); cerr != nil { if err == nil { err = cerr } } } // Rollback transaction on error if err != nil { txn.Rollback() log.Println("Transaction rolled back") return } // Commit transaction err = txn.Commit() log.Println("Transaction closed") }() // Check for error from preparing statement if err != nil { return } for { var row *decoders.DataPoint row, err = iter() if row == nil || err != nil { break } if constants.Verbose { log.Println("Data:", row.Data) log.Println("Time:", row.Time) } // Insert data. This is buffered. if _, err = stmt.Exec(row.Serial, string(row.Type), row.Data, row.Time, row.Device); err != nil { break } } return }
func (p *PostgresIndex) SaveItem(obj Indexable) error { flat := obj.Flatten() itemName := obj.DocID() collectionName := obj.Index() tx, err := datastore.Dbh.Begin() if err != nil { return err } var scID int32 err = tx.QueryRow("SELECT id FROM goiardi.search_collections WHERE organization_id = $1 AND name = $2", 1, collectionName).Scan(&scID) if err != nil { tx.Rollback() return err } _, err = tx.Exec("SELECT goiardi.delete_search_item($1, $2, $3)", collectionName, itemName, 1) if err != nil { tx.Rollback() return err } _, _ = tx.Exec("SET search_path TO goiardi") stmt, err := tx.Prepare(pq.CopyIn("search_items", "organization_id", "search_collection_id", "item_name", "value", "path")) if err != nil { tx.Rollback() return err } defer stmt.Close() for k, v := range flat { k = util.PgSearchKey(k) // will the values need escaped like in file search? switch v := v.(type) { case string: v = util.IndexEscapeStr(v) _, err = stmt.Exec(1, scID, itemName, v, k) if err != nil { tx.Rollback() return err } case []string: for _, w := range v { w = util.IndexEscapeStr(w) _, err = stmt.Exec(1, scID, itemName, w, k) if err != nil { tx.Rollback() return err } } default: err = fmt.Errorf("pg search should have never been able to reach this state. Key %s had a value %v of type %T", k, v, v) tx.Rollback() return err } } _, err = stmt.Exec() if err != nil { tx.Rollback() return err } err = tx.Commit() if err != nil { return err } return nil }
func (s *Server) processEntries(logID int64, entries []*ctclient.Entry, start int64, numEntries int) error { tx, err := s.dbpool.Begin() if err != nil { return err } defer tx.Rollback() txpr, err := s.prepareds.Tx(tx) if err != nil { return err } defer txpr.Close() var hnQueue []hostnameQueueItem observed := make([]observationQueueItem, 0, len(entries)) for i, e := range entries { err := s.processEntry(logID, tx, txpr, &hnQueue, &observed, e, start+int64(i)) log.Errore(err, "process entry") } chstmt, err := tx.Prepare(pq.CopyIn("certificate_hostname", "certificate_id", "hostname")) if err != nil { return err } defer chstmt.Close() log.Debugf("inserting %d hostnames", len(hnQueue)) for i := range hnQueue { _, err = chstmt.Exec(hnQueue[i].CertificateID, hnQueue[i].Hostname) if err != nil { return err } } chstmt.Close() obstmt, err := tx.Prepare(pq.CopyIn("certificate_observation", "certificate_id", "log_id", "log_index")) if err != nil { return err } defer obstmt.Close() log.Debugf("inserting %d observations", len(observed)) for i := range observed { _, err = obstmt.Exec(observed[i].CertificateID, logID, observed[i].LogIndex) if err != nil { return err } } obstmt.Close() _, err = tx.Exec("UPDATE certificate_log SET current_height=$1 WHERE id=$2", start+int64(numEntries), logID) if err != nil { return err } err = tx.Commit() if err != nil { return err } //*start = *start + int64(numEntries) return nil }
func (d *DB) createCrossRefsTable() error { if _, err := d.Exec(createCrossReferencesTable); err != nil { return fmt.Errorf("error creating cross-references table: %v", err) } txn, err := d.Begin() if err != nil { return fmt.Errorf("error creating transaction: %v", err) } copyXRef, err := txn.Prepare(pq.CopyIn("crossreferences", "ticket", "kind", "file_ticket", "anchor_ticket", "proto")) if err != nil { return fmt.Errorf("error preparing CrossReferences copy statement: %v", err) } rs, err := d.Query(`SELECT decor.target_ticket, decor.kind, decor.file_ticket, decor.anchor_ticket, anchor.start_offset, anchor.end_offset, anchor.snippet_start, anchor.snippet_end FROM Decorations decor JOIN Nodes anchor ON anchor.ticket = decor.anchor_ticket ORDER BY file_ticket;`) if err != nil { return fmt.Errorf("error creating decorations query: %v", err) } queryFile, err := d.Prepare("SELECT text, text_encoding FROM Nodes WHERE ticket = $1;") if err != nil { return fmt.Errorf("error preparing file query: %v", err) } var ( file srvpb.File raw srvpb.RawAnchor norm *xrefs.Normalizer lastFile string ) for rs.Next() { var ticket, kind string var snippetStart, snippetEnd sql.NullInt64 if err := rs.Scan(&ticket, &kind, &file.Ticket, &raw.Ticket, &raw.StartOffset, &raw.EndOffset, &snippetStart, &snippetEnd); err != nil { return fmt.Errorf("Decorations scan error: %v", err) } if snippetStart.Valid { raw.SnippetStart = int32(snippetStart.Int64) } else { raw.SnippetStart = 0 } if snippetEnd.Valid { raw.SnippetEnd = int32(snippetEnd.Int64) } else { raw.SnippetEnd = 0 } if lastFile != file.Ticket { var textEncoding sql.NullString if err := queryFile.QueryRow(file.Ticket).Scan(&file.Text, &textEncoding); err != nil { return fmt.Errorf("error looking up file: %v", err) } file.Encoding = textEncoding.String norm = xrefs.NewNormalizer(file.Text) lastFile = file.Ticket } a, err := assemble.ExpandAnchor(&raw, &file, norm, kind) if err != nil { return fmt.Errorf("error expanding anchor: %v", err) } rec, err := proto.Marshal(a2a(a, true)) if err != nil { return fmt.Errorf("error marshaling anchor: %v", err) } if _, err := copyXRef.Exec(ticket, kind, file.Ticket, raw.Ticket, rec); err != nil { return fmt.Errorf("copy error: %v", err) } } if _, err := copyXRef.Exec(); err != nil { return fmt.Errorf("error flushing CrossReferences: %v", err) } if err := txn.Commit(); err != nil { return fmt.Errorf("transaction commit error: %v", err) } return nil }
func (d *DB) copyEntries(entries <-chan *spb.Entry) error { // Start a transaction for a COPY statement per table nodesTx, err := d.Begin() if err != nil { return err } edgesTx, err := d.Begin() if err != nil { return err } // Create each table in their corresponding transactions to speed up COPY if _, err := nodesTx.Exec(createNodesTable); err != nil { return fmt.Errorf("error truncating Nodes table: %v", err) } else if _, err := edgesTx.Exec(createEdgeTable); err != nil { return fmt.Errorf("error truncating Edges table: %v", err) } copyNode, err := nodesTx.Prepare(pq.CopyIn( "nodes", "ticket", "node_kind", "subkind", "text", "text_encoding", "start_offset", "end_offset", "snippet_start", "snippet_end", "other_facts_num", "other_facts", )) if err != nil { return fmt.Errorf("error preparing Nodes copy: %v", err) } copyEdge, err := edgesTx.Prepare(pq.CopyIn( "edges", "source", "kind", "target", "ordinal", )) if err != nil { return fmt.Errorf("error preparing Edges copy: %v", err) } var node srvpb.Node var nodeKind string var subkind, textEncoding *string var text *[]byte var startOffset, endOffset, snippetStart, snippetEnd *int64 for e := range entries { if graphstore.IsNodeFact(e) { ticket := kytheuri.ToString(e.Source) if node.Ticket != "" && node.Ticket != ticket { nodeTicket := node.Ticket node.Ticket = "" var rec []byte if len(node.Fact) > 0 { rec, err = proto.Marshal(&node) if err != nil { return fmt.Errorf("error marshaling facts: %v", err) } } if text != nil && textEncoding == nil { textEncoding = proto.String(facts.DefaultTextEncoding) } if _, err := copyNode.Exec( nodeTicket, nodeKind, subkind, text, textEncoding, startOffset, endOffset, snippetStart, snippetEnd, len(node.Fact), rec, ); err != nil { return fmt.Errorf("error copying node: %v", err) } node.Fact, text = node.Fact[0:0], nil nodeKind = "" subkind, textEncoding = nil, nil startOffset, endOffset, snippetStart, snippetEnd = nil, nil, nil, nil } if node.Ticket == "" { node.Ticket = ticket } switch e.FactName { case facts.NodeKind: nodeKind = string(e.FactValue) case facts.Subkind: subkind = proto.String(string(e.FactValue)) case facts.Text: text = &e.FactValue case facts.TextEncoding: textEncoding = proto.String(string(e.FactValue)) case facts.AnchorStart: n, err := strconv.ParseInt(string(e.FactValue), 10, 64) if err == nil { startOffset = proto.Int64(n) } case facts.AnchorEnd: n, err := strconv.ParseInt(string(e.FactValue), 10, 64) if err == nil { endOffset = proto.Int64(n) } case facts.SnippetStart: n, err := strconv.ParseInt(string(e.FactValue), 10, 64) if err == nil { snippetStart = proto.Int64(n) } case facts.SnippetEnd: n, err := strconv.ParseInt(string(e.FactValue), 10, 64) if err == nil { snippetEnd = proto.Int64(n) } default: node.Fact = append(node.Fact, &cpb.Fact{ Name: e.FactName, Value: e.FactValue, }) } } else if edges.IsForward(e.EdgeKind) { kind, ordinal, _ := edges.ParseOrdinal(e.EdgeKind) ticket := kytheuri.ToString(e.Source) if _, err := copyEdge.Exec(ticket, kind, kytheuri.ToString(e.Target), ordinal); err != nil { return fmt.Errorf("error copying edge: %v", err) } } } if _, err := copyNode.Exec(); err != nil { return fmt.Errorf("error flushing nodes: %v", err) } else if _, err := copyEdge.Exec(); err != nil { return fmt.Errorf("error flushing edges: %v", err) } if err := nodesTx.Commit(); err != nil { return fmt.Errorf("error committing Nodes transaction: %v", err) } else if err := edgesTx.Commit(); err != nil { return fmt.Errorf("error committing Edges transaction: %v", err) } return nil }
func createDevices(ids []int) error { info := getDBSettings() db, errCon := sql.Open("postgres", fmt.Sprintf("host=%v user=%v password=%v dbname=%v sslmode=require", info.Host, info.Username, info.Password, info.Database)) defer db.Close() if errCon != nil { log.Fatal(errCon) } txn, errT := db.Begin() if errT != nil { log.Println(errT) return errT } stmt, errPrep := txn.Prepare(pq.CopyIn("userdevices", "userid", "token", "endpointarn")) if errPrep != nil { log.Fatal(errPrep) } log.Println("Start For...") var wg sync.WaitGroup for _, id := range ids { userID := id wg.Add(1) go func(id int) { defer wg.Done() mr.Seed(time.Now().UnixNano()) numDevs := mr.Intn(3) + 1 for i := 0; i < numDevs; i++ { b := make([]byte, 32) c := make([]byte, 16) cr.Read(b) cr.Read(c) token := fmt.Sprintf("%X", b[0:32]) arn := fmt.Sprintf("arn:%X", c[0:8]) _, errA := stmt.Exec(userID, token, arn) if errA != nil { log.Fatal(errA) } } }(userID) } wg.Wait() log.Println("End For") log.Println("Start Exec") _, errEX := stmt.Exec() if errEX != nil { log.Fatal(errEX) } log.Println("End Exec") errClose := stmt.Close() if errClose != nil { log.Fatal(errClose) } log.Println("Start Commit") errCommit := txn.Commit() if errCommit != nil { log.Fatal(errCommit) } log.Println("End Commit") return nil }
func copyDataToDB(data []byte) error { info := getDBSettings() db, errCon := sql.Open("postgres", fmt.Sprintf("host=%v user=%v password=%v dbname=%v sslmode=require", info.Host, info.Username, info.Password, info.Database)) defer db.Close() if errCon != nil { log.Fatal(errCon) } txn, errT := db.Begin() if errT != nil { log.Println(errT) return errT } stmt, errPrep := txn.Prepare(pq.CopyIn("jobuser", "userid", "jobid", "messageid")) if errPrep != nil { log.Fatal(errPrep) } r := bytes.NewReader(data) reader := csv.NewReader(r) reader.Comma = ',' lineCount := 0 log.Println("Start For...") var wg sync.WaitGroup for { record, err := reader.Read() if err == io.EOF { break } else if err != nil { fmt.Println("Error:", err) return err } email := record[0] _ = email jobid := "ghi789" userID, _ := strconv.Atoi(record[1]) _ = userID wg.Add(1) go func(id int, e string) { defer wg.Done() _, errA := stmt.Exec(id, e, "100") if errA != nil { log.Fatal(errA) } }(lineCount+1, jobid) lineCount++ if lineCount == 1000000 { break } } wg.Wait() log.Println("End For") log.Println("Start Exec") _, errEX := stmt.Exec() if errEX != nil { log.Fatal(errEX) } log.Println("End Exec") errClose := stmt.Close() if errClose != nil { log.Fatal(errClose) } log.Println("Start Commit") errCommit := txn.Commit() if errCommit != nil { log.Fatal(errCommit) } log.Println("End Commit") return nil }
func TestCopyNullInfNaN(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := createTestServerParams() s, db, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop() if _, err := db.Exec(` CREATE DATABASE d; SET DATABASE = d; CREATE TABLE t ( i INT NULL, f FLOAT NULL, s STRING NULL, b BYTES NULL, d DATE NULL, t TIMESTAMP NULL, n INTERVAL NULL, o BOOL NULL, e DECIMAL NULL, tz TIMESTAMP WITH TIME ZONE NULL ); `); err != nil { t.Fatal(err) } txn, err := db.Begin() if err != nil { t.Fatal(err) } stmt, err := txn.Prepare(pq.CopyIn("t")) if err != nil { t.Fatal(err) } input := [][]interface{}{ {nil, nil, nil, nil, nil, nil, nil, nil, nil, nil}, {nil, math.Inf(1), nil, nil, nil, nil, nil, nil, nil, nil}, {nil, math.Inf(-1), nil, nil, nil, nil, nil, nil, nil, nil}, {nil, math.NaN(), nil, nil, nil, nil, nil, nil, nil, nil}, } for _, in := range input { _, err = stmt.Exec(in...) if err != nil { t.Fatal(err) } } _, err = stmt.Exec() if err != nil { t.Fatal(err) } err = stmt.Close() if err != nil { t.Fatal(err) } err = txn.Commit() if err != nil { t.Fatal(err) } rows, err := db.Query("SELECT * FROM t") if err != nil { t.Fatal(err) } for row, in := range input { if !rows.Next() { t.Fatal("expected more results") } data := make([]interface{}, len(in)) for i := range data { data[i] = new(interface{}) } if err := rows.Scan(data...); err != nil { t.Fatal(err) } for i, d := range data { v := d.(*interface{}) d = *v if a, b := fmt.Sprintf("%#v", d), fmt.Sprintf("%#v", in[i]); a != b { t.Fatalf("row %v, col %v: got %#v (%T), expected %#v (%T)", row, i, d, d, in[i], in[i]) } } } }