Esempio n. 1
0
// Update block map state, removing any deleted or invalid files.
func (m *BlockMap) Update(files []protocol.FileInfo) error {
	batch := new(leveldb.Batch)
	buf := make([]byte, 4)
	var key []byte
	for _, file := range files {
		if batch.Len() > maxBatchSize {
			if err := m.db.Write(batch, nil); err != nil {
				return err
			}
			batch.Reset()
		}

		if file.IsDirectory() {
			continue
		}

		if file.IsDeleted() || file.IsInvalid() {
			for _, block := range file.Blocks {
				key = m.blockKeyInto(key, block.Hash, file.Name)
				batch.Delete(key)
			}
			continue
		}

		for i, block := range file.Blocks {
			binary.BigEndian.PutUint32(buf, uint32(i))
			key = m.blockKeyInto(key, block.Hash, file.Name)
			batch.Put(key, buf)
		}
	}
	return m.db.Write(batch, nil)
}
Esempio n. 2
0
// childFileIds returns a map containing IDs of all Files that have parent
// refs to the given file.  The returned map keys are IDs, and the map values
// indicate if the child is a directory.
func (d *DriveDB) childFileIds(fileId string) (map[string]bool, error) {
	ids := make(map[string]bool)
	d.iters.Add(1)
	batch := new(leveldb.Batch)
	iter := d.db.NewIterator(util.BytesPrefix(childKeyPrefix(fileId)), nil)
	for iter.Next() {
		pidcid := deKey(string(iter.Key()))
		cid := pidcid[len(fileId)+1:]
		if gdriveFile, err := d.FileById(cid); err != nil {
			log.Printf("unknown fileId %v: %v", fileId, err)
			batch.Delete(iter.Key())
		} else {
			ids[cid] = gdriveFile.MimeType == driveFolderMimeType
		}
	}
	iter.Release()
	d.iters.Done()
	if batch.Len() > 0 {
		err := d.db.Write(batch, nil)
		if err != nil {
			log.Printf("error writing to db: %v", err)
		}
	}
	return ids, iter.Error()
}
Esempio n. 3
0
// ChildFileIds returns the IDs of all Files that have parent refs to the given file.
func (d *DriveDB) ChildFileIds(fileId string) ([]string, error) {
	var ids []string
	d.iters.Add(1)
	batch := new(leveldb.Batch)
	iter := d.db.NewIterator(util.BytesPrefix(childKey(fileId)), nil)
	for iter.Next() {
		pidcid := deKey(string(iter.Key()))
		cid := pidcid[len(fileId)+1:]
		found, err := d.db.Has(fileKey(cid), nil)
		if err == nil && found {
			ids = append(ids, cid)
		} else {
			batch.Delete(iter.Key())
		}
	}
	iter.Release()
	d.iters.Done()
	if batch.Len() > 0 {
		err := d.db.Write(batch, nil)
		if err != nil {
			log.Printf("error writing to db: %v", err)
		}
	}
	return ids, iter.Error()
}
Esempio n. 4
0
func (l *internalLevelDBStore) putBatch(b *leveldb.Batch, numBytes int) {
	l.concurrentWriteLimit <- struct{}{}
	err := l.db.Write(b, nil)
	d.Chk.NoError(err)
	l.putCount += int64(b.Len())
	l.putBytes += int64(numBytes)
	<-l.concurrentWriteLimit
}
Esempio n. 5
0
// Delete all states for a metric name.
// This operation is currently only used for cleaning.
func (db *DB) Delete(name string) error {
	// Name must be the key prefix
	iter := db.db.NewIterator(util.BytesPrefix([]byte(name)), nil)
	batch := new(leveldb.Batch)
	for iter.Next() {
		key := iter.Key()
		batch.Delete(key)
	}
	if batch.Len() > 0 {
		return db.db.Write(batch, nil)
	}
	return nil
}
Esempio n. 6
0
// Discard block map state, removing the given files
func (m *BlockMap) Discard(files []protocol.FileInfo) error {
	batch := new(leveldb.Batch)
	var key []byte
	for _, file := range files {
		if batch.Len() > maxBatchSize {
			if err := m.db.Write(batch, nil); err != nil {
				return err
			}
			batch.Reset()
		}

		for _, block := range file.Blocks {
			key = m.blockKeyInto(key, block.Hash, file.Name)
			batch.Delete(key)
		}
	}
	return m.db.Write(batch, nil)
}
Esempio n. 7
0
// Drop block map, removing all entries related to this block map from the db.
func (m *BlockMap) Drop() error {
	batch := new(leveldb.Batch)
	iter := m.db.NewIterator(util.BytesPrefix(m.blockKeyInto(nil, nil, "")[:keyPrefixLen+keyFolderLen]), nil)
	defer iter.Release()
	for iter.Next() {
		if batch.Len() > maxBatchSize {
			if err := m.db.Write(batch, nil); err != nil {
				return err
			}
			batch.Reset()
		}

		batch.Delete(iter.Key())
	}
	if iter.Error() != nil {
		return iter.Error()
	}
	return m.db.Write(batch, nil)
}
Esempio n. 8
0
// Reset removes all entries in this namespace.
func (n *NamespacedKV) Reset() {
	it := n.db.NewIterator(util.BytesPrefix(n.prefix), nil)
	defer it.Release()
	batch := new(leveldb.Batch)
	for it.Next() {
		batch.Delete(it.Key())
		if batch.Len() > batchFlushSize {
			if err := n.db.Write(batch, nil); err != nil {
				panic(err)
			}
			batch.Reset()
		}
	}
	if batch.Len() > 0 {
		if err := n.db.Write(batch, nil); err != nil {
			panic(err)
		}
	}
}
Esempio n. 9
0
// Delete metrics in a timestamp range, the range is left open and right
// closed.
func (db *DB) Delete(name string, start, end uint32) error {
	// Key encoding.
	startMetric := &models.Metric{Name: name, Stamp: start}
	endMetric := &models.Metric{Name: name, Stamp: end}
	startKey := encodeKey(startMetric)
	endKey := encodeKey(endMetric)
	// Iterate db.
	iter := db.db.NewIterator(&util.Range{
		Start: startKey,
		Limit: endKey,
	}, nil)
	batch := new(leveldb.Batch)
	for iter.Next() {
		key := iter.Key()
		batch.Delete(key)
	}
	if batch.Len() > 0 {
		return db.db.Write(batch, nil)
	}
	return nil
}
Esempio n. 10
0
func ldbUpdate(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) int64 {
	runtime.GC()

	batch := new(leveldb.Batch)
	if debugDB {
		l.Debugf("new batch %p", batch)
	}
	snap, err := db.GetSnapshot()
	if err != nil {
		panic(err)
	}
	if debugDB {
		l.Debugf("created snapshot %p", snap)
	}
	defer func() {
		if debugDB {
			l.Debugf("close snapshot %p", snap)
		}
		snap.Release()
	}()

	var maxLocalVer int64
	var fk []byte
	for _, f := range fs {
		name := []byte(f.Name)
		fk = deviceKeyInto(fk[:cap(fk)], folder, device, name)
		if debugDB {
			l.Debugf("snap.Get %p %x", snap, fk)
		}
		bs, err := snap.Get(fk, nil)
		if err == leveldb.ErrNotFound {
			if lv := ldbInsert(batch, folder, device, f); lv > maxLocalVer {
				maxLocalVer = lv
			}
			if f.IsInvalid() {
				ldbRemoveFromGlobal(snap, batch, folder, device, name)
			} else {
				ldbUpdateGlobal(snap, batch, folder, device, f)
			}
			continue
		}

		var ef FileInfoTruncated
		err = ef.UnmarshalXDR(bs)
		if err != nil {
			panic(err)
		}
		// Flags might change without the version being bumped when we set the
		// invalid flag on an existing file.
		if !ef.Version.Equal(f.Version) || ef.Flags != f.Flags {
			if lv := ldbInsert(batch, folder, device, f); lv > maxLocalVer {
				maxLocalVer = lv
			}
			if f.IsInvalid() {
				ldbRemoveFromGlobal(snap, batch, folder, device, name)
			} else {
				ldbUpdateGlobal(snap, batch, folder, device, f)
			}
		}

		// Write out and reuse the batch every few records, to avoid the batch
		// growing too large and thus allocating unnecessarily much memory.
		if batch.Len() > batchFlushSize {
			if debugDB {
				l.Debugf("db.Write %p", batch)
			}

			err = db.Write(batch, nil)
			if err != nil {
				panic(err)
			}

			batch.Reset()
		}
	}

	if debugDB {
		l.Debugf("db.Write %p", batch)
	}
	err = db.Write(batch, nil)
	if err != nil {
		panic(err)
	}

	return maxLocalVer
}
Esempio n. 11
0
func ldbGenericReplace(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo, deleteFn deletionHandler) int64 {
	runtime.GC()

	sort.Sort(fileList(fs)) // sort list on name, same as in the database

	start := deviceKey(folder, device, nil)                            // before all folder/device files
	limit := deviceKey(folder, device, []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files

	batch := new(leveldb.Batch)
	if debugDB {
		l.Debugf("new batch %p", batch)
	}
	snap, err := db.GetSnapshot()
	if err != nil {
		panic(err)
	}
	if debugDB {
		l.Debugf("created snapshot %p", snap)
	}
	defer func() {
		if debugDB {
			l.Debugf("close snapshot %p", snap)
		}
		snap.Release()
	}()

	dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
	defer dbi.Release()

	moreDb := dbi.Next()
	fsi := 0
	var maxLocalVer int64

	for {
		var newName, oldName []byte
		moreFs := fsi < len(fs)

		if !moreDb && !moreFs {
			break
		}

		if moreFs {
			newName = []byte(fs[fsi].Name)
		}

		if moreDb {
			oldName = deviceKeyName(dbi.Key())
		}

		cmp := bytes.Compare(newName, oldName)

		if debugDB {
			l.Debugf("generic replace; folder=%q device=%v moreFs=%v moreDb=%v cmp=%d newName=%q oldName=%q", folder, protocol.DeviceIDFromBytes(device), moreFs, moreDb, cmp, newName, oldName)
		}

		switch {
		case moreFs && (!moreDb || cmp == -1):
			if debugDB {
				l.Debugln("generic replace; missing - insert")
			}
			// Database is missing this file. Insert it.
			if lv := ldbInsert(batch, folder, device, fs[fsi]); lv > maxLocalVer {
				maxLocalVer = lv
			}
			if fs[fsi].IsInvalid() {
				ldbRemoveFromGlobal(snap, batch, folder, device, newName)
			} else {
				ldbUpdateGlobal(snap, batch, folder, device, fs[fsi])
			}
			fsi++

		case moreFs && moreDb && cmp == 0:
			// File exists on both sides - compare versions. We might get an
			// update with the same version and different flags if a device has
			// marked a file as invalid, so handle that too.
			if debugDB {
				l.Debugln("generic replace; exists - compare")
			}
			var ef FileInfoTruncated
			ef.UnmarshalXDR(dbi.Value())
			if !fs[fsi].Version.Equal(ef.Version) || fs[fsi].Flags != ef.Flags {
				if debugDB {
					l.Debugln("generic replace; differs - insert")
				}
				if lv := ldbInsert(batch, folder, device, fs[fsi]); lv > maxLocalVer {
					maxLocalVer = lv
				}
				if fs[fsi].IsInvalid() {
					ldbRemoveFromGlobal(snap, batch, folder, device, newName)
				} else {
					ldbUpdateGlobal(snap, batch, folder, device, fs[fsi])
				}
			} else if debugDB {
				l.Debugln("generic replace; equal - ignore")
			}

			fsi++
			moreDb = dbi.Next()

		case moreDb && (!moreFs || cmp == 1):
			if debugDB {
				l.Debugln("generic replace; exists - remove")
			}
			if lv := deleteFn(snap, batch, folder, device, oldName, dbi); lv > maxLocalVer {
				maxLocalVer = lv
			}
			moreDb = dbi.Next()
		}

		// Write out and reuse the batch every few records, to avoid the batch
		// growing too large and thus allocating unnecessarily much memory.
		if batch.Len() > batchFlushSize {
			if debugDB {
				l.Debugf("db.Write %p", batch)
			}

			err = db.Write(batch, nil)
			if err != nil {
				panic(err)
			}

			batch.Reset()
		}
	}

	if debugDB {
		l.Debugf("db.Write %p", batch)
	}
	err = db.Write(batch, nil)
	if err != nil {
		panic(err)
	}

	return maxLocalVer
}
Esempio n. 12
0
func main() {
	flag.Parse()

	if enableBufferPool {
		bpool = util.NewBufferPool(opt.DefaultBlockSize + 128)
	}

	log.Printf("Test DB stored at %q", dbPath)
	if httpProf != "" {
		log.Printf("HTTP pprof listening at %q", httpProf)
		runtime.SetBlockProfileRate(1)
		go func() {
			if err := http.ListenAndServe(httpProf, nil); err != nil {
				log.Fatalf("HTTPPROF: %v", err)
			}
		}()
	}

	runtime.GOMAXPROCS(runtime.NumCPU())

	os.RemoveAll(dbPath)
	stor, err := storage.OpenFile(dbPath, false)
	if err != nil {
		log.Fatal(err)
	}
	tstor := &testingStorage{stor}
	defer tstor.Close()

	fatalf := func(err error, format string, v ...interface{}) {
		atomic.StoreUint32(&fail, 1)
		atomic.StoreUint32(&done, 1)
		log.Printf("FATAL: "+format, v...)
		if err != nil && errors.IsCorrupted(err) {
			cerr := err.(*errors.ErrCorrupted)
			if !cerr.Fd.Zero() && cerr.Fd.Type == storage.TypeTable {
				log.Print("FATAL: corruption detected, scanning...")
				if !tstor.scanTable(storage.FileDesc{Type: storage.TypeTable, Num: cerr.Fd.Num}, false) {
					log.Printf("FATAL: unable to find corrupted key/value pair in table %v", cerr.Fd)
				}
			}
		}
		runtime.Goexit()
	}

	if openFilesCacheCapacity == 0 {
		openFilesCacheCapacity = -1
	}
	o := &opt.Options{
		OpenFilesCacheCapacity: openFilesCacheCapacity,
		DisableBufferPool:      !enableBufferPool,
		DisableBlockCache:      !enableBlockCache,
		ErrorIfExist:           true,
		Compression:            opt.NoCompression,
	}
	if enableCompression {
		o.Compression = opt.DefaultCompression
	}

	db, err := leveldb.Open(tstor, o)
	if err != nil {
		log.Fatal(err)
	}
	defer db.Close()

	var (
		mu              = &sync.Mutex{}
		gGetStat        = &latencyStats{}
		gIterStat       = &latencyStats{}
		gWriteStat      = &latencyStats{}
		gTrasactionStat = &latencyStats{}
		startTime       = time.Now()

		writeReq    = make(chan *leveldb.Batch)
		writeAck    = make(chan error)
		writeAckAck = make(chan struct{})
	)

	go func() {
		for b := range writeReq {

			var err error
			if mrand.Float64() < transactionProb {
				log.Print("> Write using transaction")
				gTrasactionStat.start()
				var tr *leveldb.Transaction
				if tr, err = db.OpenTransaction(); err == nil {
					if err = tr.Write(b, nil); err == nil {
						if err = tr.Commit(); err == nil {
							gTrasactionStat.record(b.Len())
						}
					} else {
						tr.Discard()
					}
				}
			} else {
				gWriteStat.start()
				if err = db.Write(b, nil); err == nil {
					gWriteStat.record(b.Len())
				}
			}
			writeAck <- err
			<-writeAckAck
		}
	}()

	go func() {
		for {
			time.Sleep(3 * time.Second)

			log.Print("------------------------")

			log.Printf("> Elapsed=%v", time.Now().Sub(startTime))
			mu.Lock()
			log.Printf("> GetLatencyMin=%v GetLatencyMax=%v GetLatencyAvg=%v GetRatePerSec=%d",
				gGetStat.min, gGetStat.max, gGetStat.avg(), gGetStat.ratePerSec())
			log.Printf("> IterLatencyMin=%v IterLatencyMax=%v IterLatencyAvg=%v IterRatePerSec=%d",
				gIterStat.min, gIterStat.max, gIterStat.avg(), gIterStat.ratePerSec())
			log.Printf("> WriteLatencyMin=%v WriteLatencyMax=%v WriteLatencyAvg=%v WriteRatePerSec=%d",
				gWriteStat.min, gWriteStat.max, gWriteStat.avg(), gWriteStat.ratePerSec())
			log.Printf("> TransactionLatencyMin=%v TransactionLatencyMax=%v TransactionLatencyAvg=%v TransactionRatePerSec=%d",
				gTrasactionStat.min, gTrasactionStat.max, gTrasactionStat.avg(), gTrasactionStat.ratePerSec())
			mu.Unlock()

			cachedblock, _ := db.GetProperty("leveldb.cachedblock")
			openedtables, _ := db.GetProperty("leveldb.openedtables")
			alivesnaps, _ := db.GetProperty("leveldb.alivesnaps")
			aliveiters, _ := db.GetProperty("leveldb.aliveiters")
			blockpool, _ := db.GetProperty("leveldb.blockpool")
			log.Printf("> BlockCache=%s OpenedTables=%s AliveSnaps=%s AliveIter=%s BlockPool=%q",
				cachedblock, openedtables, alivesnaps, aliveiters, blockpool)

			log.Print("------------------------")
		}
	}()

	for ns, numKey := range numKeys {
		func(ns, numKey int) {
			log.Printf("[%02d] STARTING: numKey=%d", ns, numKey)

			keys := make([][]byte, numKey)
			for i := range keys {
				keys[i] = randomData(nil, byte(ns), 1, uint32(i), keyLen)
			}

			wg.Add(1)
			go func() {
				var wi uint32
				defer func() {
					log.Printf("[%02d] WRITER DONE #%d", ns, wi)
					wg.Done()
				}()

				var (
					b       = new(leveldb.Batch)
					k2, v2  []byte
					nReader int32
				)
				for atomic.LoadUint32(&done) == 0 {
					log.Printf("[%02d] WRITER #%d", ns, wi)

					b.Reset()
					for _, k1 := range keys {
						k2 = randomData(k2, byte(ns), 2, wi, keyLen)
						v2 = randomData(v2, byte(ns), 3, wi, valueLen)
						b.Put(k2, v2)
						b.Put(k1, k2)
					}
					writeReq <- b
					if err := <-writeAck; err != nil {
						writeAckAck <- struct{}{}
						fatalf(err, "[%02d] WRITER #%d db.Write: %v", ns, wi, err)
					}

					snap, err := db.GetSnapshot()
					if err != nil {
						writeAckAck <- struct{}{}
						fatalf(err, "[%02d] WRITER #%d db.GetSnapshot: %v", ns, wi, err)
					}

					writeAckAck <- struct{}{}

					wg.Add(1)
					atomic.AddInt32(&nReader, 1)
					go func(snapwi uint32, snap *leveldb.Snapshot) {
						var (
							ri       int
							iterStat = &latencyStats{}
							getStat  = &latencyStats{}
						)
						defer func() {
							mu.Lock()
							gGetStat.add(getStat)
							gIterStat.add(iterStat)
							mu.Unlock()

							atomic.AddInt32(&nReader, -1)
							log.Printf("[%02d] READER #%d.%d DONE Snap=%v Alive=%d IterLatency=%v GetLatency=%v", ns, snapwi, ri, snap, atomic.LoadInt32(&nReader), iterStat.avg(), getStat.avg())
							snap.Release()
							wg.Done()
						}()

						stopi := snapwi + 3
						for (ri < 3 || atomic.LoadUint32(&wi) < stopi) && atomic.LoadUint32(&done) == 0 {
							var n int
							iter := snap.NewIterator(dataPrefixSlice(byte(ns), 1), nil)
							iterStat.start()
							for iter.Next() {
								k1 := iter.Key()
								k2 := iter.Value()
								iterStat.record(1)

								if dataNS(k2) != byte(ns) {
									fatalf(nil, "[%02d] READER #%d.%d K%d invalid in-key NS: want=%d got=%d", ns, snapwi, ri, n, ns, dataNS(k2))
								}

								kwritei := dataI(k2)
								if kwritei != snapwi {
									fatalf(nil, "[%02d] READER #%d.%d K%d invalid in-key iter num: %d", ns, snapwi, ri, n, kwritei)
								}

								getStat.start()
								v2, err := snap.Get(k2, nil)
								if err != nil {
									fatalf(err, "[%02d] READER #%d.%d K%d snap.Get: %v\nk1: %x\n -> k2: %x", ns, snapwi, ri, n, err, k1, k2)
								}
								getStat.record(1)

								if checksum0, checksum1 := dataChecksum(v2); checksum0 != checksum1 {
									err := &errors.ErrCorrupted{Fd: storage.FileDesc{0xff, 0}, Err: fmt.Errorf("v2: %x: checksum mismatch: %v vs %v", v2, checksum0, checksum1)}
									fatalf(err, "[%02d] READER #%d.%d K%d snap.Get: %v\nk1: %x\n -> k2: %x", ns, snapwi, ri, n, err, k1, k2)
								}

								n++
								iterStat.start()
							}
							iter.Release()
							if err := iter.Error(); err != nil {
								fatalf(err, "[%02d] READER #%d.%d K%d iter.Error: %v", ns, snapwi, ri, numKey, err)
							}
							if n != numKey {
								fatalf(nil, "[%02d] READER #%d.%d missing keys: want=%d got=%d", ns, snapwi, ri, numKey, n)
							}

							ri++
						}
					}(wi, snap)

					atomic.AddUint32(&wi, 1)
				}
			}()

			delB := new(leveldb.Batch)
			wg.Add(1)
			go func() {
				var (
					i        int
					iterStat = &latencyStats{}
				)
				defer func() {
					log.Printf("[%02d] SCANNER DONE #%d", ns, i)
					wg.Done()
				}()

				time.Sleep(2 * time.Second)

				for atomic.LoadUint32(&done) == 0 {
					var n int
					delB.Reset()
					iter := db.NewIterator(dataNsSlice(byte(ns)), nil)
					iterStat.start()
					for iter.Next() && atomic.LoadUint32(&done) == 0 {
						k := iter.Key()
						v := iter.Value()
						iterStat.record(1)

						for ci, x := range [...][]byte{k, v} {
							checksum0, checksum1 := dataChecksum(x)
							if checksum0 != checksum1 {
								if ci == 0 {
									fatalf(nil, "[%02d] SCANNER %d.%d invalid key checksum: want %d, got %d\n%x -> %x", ns, i, n, checksum0, checksum1, k, v)
								} else {
									fatalf(nil, "[%02d] SCANNER %d.%d invalid value checksum: want %d, got %d\n%x -> %x", ns, i, n, checksum0, checksum1, k, v)
								}
							}
						}

						if dataPrefix(k) == 2 || mrand.Int()%999 == 0 {
							delB.Delete(k)
						}

						n++
						iterStat.start()
					}
					iter.Release()
					if err := iter.Error(); err != nil {
						fatalf(err, "[%02d] SCANNER #%d.%d iter.Error: %v", ns, i, n, err)
					}

					if n > 0 {
						log.Printf("[%02d] SCANNER #%d IterLatency=%v", ns, i, iterStat.avg())
					}

					if delB.Len() > 0 && atomic.LoadUint32(&done) == 0 {
						t := time.Now()
						writeReq <- delB
						if err := <-writeAck; err != nil {
							writeAckAck <- struct{}{}
							fatalf(err, "[%02d] SCANNER #%d db.Write: %v", ns, i, err)
						} else {
							writeAckAck <- struct{}{}
						}
						log.Printf("[%02d] SCANNER #%d Deleted=%d Time=%v", ns, i, delB.Len(), time.Now().Sub(t))
					}

					i++
				}
			}()
		}(ns, numKey)
	}

	go func() {
		sig := make(chan os.Signal)
		signal.Notify(sig, os.Interrupt, os.Kill)
		log.Printf("Got signal: %v, exiting...", <-sig)
		atomic.StoreUint32(&done, 1)
	}()

	wg.Wait()
}
Esempio n. 13
0
func run(d *osmpbf.Decoder, db *leveldb.DB, config Settings) {

	batch := new(leveldb.Batch)

	var nc, wc, rc uint64
	for {
		if v, err := d.Decode(); err == io.EOF {
			break
		} else if err != nil {
			log.Fatal(err)
		} else {
			switch v := v.(type) {

			case *osmpbf.Node:

				// inc count
				nc++

				// ----------------
				// write to leveldb
				// ----------------

				// write immediately
				// cacheStore(db, v)

				// write in batches
				cacheQueue(batch, v)
				if batch.Len() > config.BatchSize {
					cacheFlush(db, batch)
				}

				// ----------------
				// handle conditions
				// ----------------

				if len(config.Ids) != 0 {
					i := sort.SearchStrings(config.Ids, strconv.FormatInt(v.ID, 10))
					if i < len(config.Ids) && config.Ids[i] == strconv.FormatInt(v.ID, 10) {
						onNode(v)
					}
				} else {
					if !hasTags(v.Tags) {
						break
					}

					v.Tags = trimTags(v.Tags)
					if containsValidTags(v.Tags, config.Tags) {
						onNode(v)
					}
				}

			case *osmpbf.Way:

				// ----------------
				// write to leveldb
				// ----------------

				// flush outstanding batches
				if batch.Len() > 1 {
					cacheFlush(db, batch)
				}

				// inc count
				wc++

				if len(config.Ids) != 0 {
					i := sort.SearchStrings(config.Ids, strconv.FormatInt(v.ID, 10))
					if i < len(config.Ids) && config.Ids[i] == strconv.FormatInt(v.ID, 10) {
						// lookup from leveldb
						latlons, err := cacheLookup(db, v)

						// skip ways which fail to denormalize
						if err != nil {
							break
						}

						// compute centroid
						var centroid = computeCentroid(latlons)

						onWay(v, latlons, centroid)
					}
				} else {
					if !hasTags(v.Tags) {
						break
					}

					v.Tags = trimTags(v.Tags)
					if containsValidTags(v.Tags, config.Tags) {

						// lookup from leveldb
						latlons, err := cacheLookup(db, v)

						// skip ways which fail to denormalize
						if err != nil {
							break
						}

						// compute centroid
						var centroid = computeCentroid(latlons)

						onWay(v, latlons, centroid)
					}
				}

			case *osmpbf.Relation:

				// inc count
				rc++

				onRelation(v)

			default:

				log.Fatalf("unknown type %T\n", v)

			}
		}
	}

	// fmt.Printf("Nodes: %d, Ways: %d, Relations: %d\n", nc, wc, rc)
}