예제 #1
0
func (fg fakeGateway) GetTransaction(txid cipher.SHA256) (*visor.TransactionResult, error) {
	str, ok := fg.transactions[txid.Hex()]
	if ok {
		return decodeTransaction(str), nil
	}
	return nil, nil
}
예제 #2
0
// Get returns signature of specific block
func (bs BlockSigs) Get(hash cipher.SHA256) (cipher.Sig, error) {
	bin := bs.Sigs.Get(hash[:])
	if bin == nil {
		return cipher.Sig{}, fmt.Errorf("no sig for %v", hash.Hex())
	}
	var sig cipher.Sig
	if err := encoder.DeserializeRaw(bin, &sig); err != nil {
		return cipher.Sig{}, err
	}
	return sig, nil
}
예제 #3
0
// Reconstructs the indices from the underlying Array
func (self *UnspentPool) Rebuild(uxs UxArray) {
	self.Pool = make(map[cipher.SHA256]UxOut, len(uxs))
	xh := cipher.SHA256{}
	for i, _ := range uxs {
		h := uxs[i].Hash()
		self.Pool[h] = uxs[i]
		xh = xh.Xor(uxs[i].SnapshotHash())
	}
	self.XorHash = xh
	if len(self.Pool) != len(uxs) {
		log.Panic("Corrupt UnspentPool array: contains duplicate UxOut")
	}
}
예제 #4
0
func NewTransactionOutputJSON(ux coin.TransactionOutput, src_tx cipher.SHA256) TransactionOutputJSON {
	tmp := coin.UxOut{
		Body: coin.UxBody{
			SrcTransaction: src_tx,
			Address:        ux.Address,
			Coins:          ux.Coins,
			Hours:          ux.Hours,
		},
	}

	var o TransactionOutputJSON
	o.Hash = tmp.Hash().Hex()
	o.SourceTransaction = src_tx.Hex()

	o.Address = ux.Address.String()
	o.Coins = ux.Coins
	o.Hours = ux.Hours
	return o
}
예제 #5
0
func TestUnspentPoolRebuild(t *testing.T) {
	up := NewUnspentPool()
	arr := make(UxArray, 0)
	arr = append(arr, makeUxOut(t))
	arr = append(arr, makeUxOut(t))
	assert.Equal(t, len(up.Pool), 0)
	assert.Equal(t, up.XorHash, cipher.SHA256{})
	up.Rebuild(arr)
	assert.Equal(t, len(up.Pool), 2)
	for _, x := range arr {
		ux, ok := up.Pool[x.Hash()]
		assert.True(t, ok)
		assert.Equal(t, x, ux)
	}
	h := cipher.SHA256{}
	h = h.Xor(arr[0].SnapshotHash())
	h = h.Xor(arr[1].SnapshotHash())
	assert.Equal(t, up.XorHash, h)
	assert.NotEqual(t, up.XorHash, cipher.SHA256{})

	// Duplicate item in array causes panic
	arr = append(arr, arr[0])
	assert.Panics(t, func() { up.Rebuild(arr) })
}
예제 #6
0
func (self *BlockStat) try_add_hash_and_sig(
	hash cipher.SHA256,
	sig cipher.Sig) int {

	if self.frozen {
		// To get a more accurate number of rejects, one would need to
		// do as below, except insertion/updating. However, we do not
		// want to incurr a calculation in order to get a more
		// accurate debug numbers. So we simply:
		self.debug_reject_count += 1
		return 3
	}

	// 2016090* ROBUSTNESS: We need to put a limit on the number of
	// (signer_pubkey,hash) pairs that we process and forward. One
	// reason is to prevent an attack in which the attacker launches a
	// large number of nodes each of which make valid blocks, thus
	// causing large traffic that can potentially degrade the network
	// performace. Example: when we receive, say 63
	// (signer_pubkey,hash) pairs for a given seqno, we stop listening
	// for the updates. Say, the breakdown is: hash H1 from 50
	// signers, hash H2 from 10, hash H3 from 2 and hash H4 from 1.
	// We make a local decision to choose H1.
	if self.accept_count >= Cfg_consensus_max_candidate_messages {
		self.debug_neglect_count += 1
		return 1 // same as skip
	}

	// 20160913 Remember that we move those BlockStat that are old
	// enought (seqno difference is used as a measure of time
	// difference) to BlockChain, so that the storage requerement for
	// each node is now smaller. Yet we keep the limits to avoid
	// excessive forwarding.

	// At the end of the function, one of them must be 'true'.
	action_update := false
	action_skip := false
	action_insert := false

	var info HashCandidate

	if true {
		var have bool

		info, have = self.hash2info[hash]

		if !have {
			info = HashCandidate{}
			info.Init()
			action_insert = true
		} else {
			if _, saw := info.sig2none[sig]; saw {
				action_skip = true
			} else {
				action_update = true
			}
		}
	}

	if action_insert || action_update {

		if sig == all_zero_sig || hash == all_zero_hash { // Hack
			return 4 // <<<<<<<<
		}

		// PERFORMANCE: This is an expensive call:
		signer_pubkey, err := cipher.PubKeyFromSig(sig, hash)
		if err != nil {
			return 4 // <<<<<<<<
		}

		// Now do the check that we could not do prior to
		// obtaining 'signer_pubkey':
		if _, have := info.pubkey2sig[signer_pubkey]; have {
			// WARNING: ROBUSTNESS: The pubkey 'signer_pubkey' has
			// already published data with the same hash and same
			// seqno. This is not a duplicate data: the duplicates
			// have been intercepted earlier bsaged in (hash,sig)
			// pair; instead, the pubkey signed the block again and
			// published the result. So this can be a bug/mistake or
			// an attempt to artificially increase the traffic on our
			// network.
			self.debug_reject_count += 1

			action_update = false
			action_skip = true
			action_insert = false

			fmt.Printf("WARNING: %p, Detected malicious publish from"+
				" pubkey=%s for hash=%s sig=%s\n", &info,
				signer_pubkey.Hex()[:8], hash.Hex()[:8], sig.Hex()[:8])
		}

		// These bools could have change, see above:
		if action_insert || action_update {
			if false {
				fmt.Printf("Calling %p->ObserveSigAndPubkey(sig=%s,"+
					" signer_pubkey=%s), hash=%s\n", &info,
					sig.Hex()[:8], signer_pubkey.Hex()[:8], hash.Hex()[:8])
			}
			info.ObserveSigAndPubkey(sig, signer_pubkey)
			self.accept_count += 1
		}
	}

	if action_insert {
		self.hash2info[hash] = info
	}

	self.debug_count += 1
	self.debug_usage += 1

	if !(action_update || action_skip || action_insert) {
		panic("Inconsistent BlockStat::try_add_hash_and_sig()")
		return -1
	}

	if action_update || action_insert {
		return 0
	}

	if action_skip {
		return 1
	}

	return -1
}