Dash Core  0.12.2.1
P2P Digital Currency
pruning.py
Go to the documentation of this file.
1 #!/usr/bin/env python2
2 # Copyright (c) 2014-2015 The Bitcoin Core developers
3 # Distributed under the MIT software license, see the accompanying
4 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 
6 #
7 # Test pruning code
8 # ********
9 # WARNING:
10 # This test uses 4GB of disk space.
11 # This test takes 30 mins or more (up to 2 hours)
12 # ********
13 
14 from test_framework.test_framework import BitcoinTestFramework
15 from test_framework.util import *
16 
17 def calc_usage(blockdir):
18  return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.)
19 
21 
22  def __init__(self):
23  self.utxo = []
24  self.address = ["",""]
26 
27  def setup_chain(self):
28  print("Initializing test directory "+self.options.tmpdir)
29  initialize_chain_clean(self.options.tmpdir, 3)
30 
31  def setup_network(self):
32  self.nodes = []
33  self.is_network_split = False
34 
35  # Create nodes 0 and 1 to mine
36  self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
37  self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
38 
39  # Create node 2 to test pruning
40  self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-prune=550"], timewait=900))
41  self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/"
42 
43  self.address[0] = self.nodes[0].getnewaddress()
44  self.address[1] = self.nodes[1].getnewaddress()
45 
46  # Determine default relay fee
47  self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
48 
49  connect_nodes(self.nodes[0], 1)
50  connect_nodes(self.nodes[1], 2)
51  connect_nodes(self.nodes[2], 0)
52  sync_blocks(self.nodes[0:3])
53 
54  def create_big_chain(self):
55  # Start by creating some coinbases we can spend later
56  self.nodes[1].generate(200)
57  sync_blocks(self.nodes[0:2])
58  self.nodes[0].generate(150)
59  # Then mine enough full blocks to create more than 550MiB of data
60  for i in xrange(645):
61  self.mine_full_block(self.nodes[0], self.address[0])
62 
63  sync_blocks(self.nodes[0:3])
64 
65  def test_height_min(self):
66  if not os.path.isfile(self.prunedir+"blk00000.dat"):
67  raise AssertionError("blk00000.dat is missing, pruning too early")
68  print "Success"
69  print "Though we're already using more than 550MiB, current usage:", calc_usage(self.prunedir)
70  print "Mining 25 more blocks should cause the first block file to be pruned"
71  # Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
72  for i in xrange(25):
73  self.mine_full_block(self.nodes[0],self.address[0])
74 
75  waitstart = time.time()
76  while os.path.isfile(self.prunedir+"blk00000.dat"):
77  time.sleep(0.1)
78  if time.time() - waitstart > 10:
79  raise AssertionError("blk00000.dat not pruned when it should be")
80 
81  print "Success"
82  usage = calc_usage(self.prunedir)
83  print "Usage should be below target:", usage
84  if (usage > 550):
85  raise AssertionError("Pruning target not being met")
86 
88  # Create stale blocks in manageable sized chunks
89  print "Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds"
90 
91  for j in xrange(12):
92  # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
93  # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
94  # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
95  stop_node(self.nodes[0],0)
96  self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)
97  # Mine 24 blocks in node 1
98  self.utxo = self.nodes[1].listunspent()
99  for i in xrange(24):
100  if j == 0:
101  self.mine_full_block(self.nodes[1],self.address[1])
102  else:
103  self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
104 
105  # Reorg back with 25 block chain from node 0
106  self.utxo = self.nodes[0].listunspent()
107  for i in xrange(25):
108  self.mine_full_block(self.nodes[0],self.address[0])
109 
110  # Create connections in the order so both nodes can see the reorg at the same time
111  connect_nodes(self.nodes[1], 0)
112  connect_nodes(self.nodes[2], 0)
113  sync_blocks(self.nodes[0:3])
114 
115  print "Usage can be over target because of high stale rate:", calc_usage(self.prunedir)
116 
117  def reorg_test(self):
118  # Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
119  # This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
120  # Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
121  # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
122  stop_node(self.nodes[1],1)
123  self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
124 
125  height = self.nodes[1].getblockcount()
126  print "Current block height:", height
127 
128  invalidheight = height-287
129  badhash = self.nodes[1].getblockhash(invalidheight)
130  print "Invalidating block at height:",invalidheight,badhash
131  self.nodes[1].invalidateblock(badhash)
132 
133  # We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
134  # So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
135  mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
136  curhash = self.nodes[1].getblockhash(invalidheight - 1)
137  while curhash != mainchainhash:
138  self.nodes[1].invalidateblock(curhash)
139  curhash = self.nodes[1].getblockhash(invalidheight - 1)
140 
141  assert(self.nodes[1].getblockcount() == invalidheight - 1)
142  print "New best height", self.nodes[1].getblockcount()
143 
144  # Reboot node1 to clear those giant tx's from mempool
145  stop_node(self.nodes[1],1)
146  self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
147 
148  print "Generating new longer chain of 300 more blocks"
149  self.nodes[1].generate(300)
150 
151  print "Reconnect nodes"
152  connect_nodes(self.nodes[0], 1)
153  connect_nodes(self.nodes[2], 1)
154  sync_blocks(self.nodes[0:3])
155 
156  print "Verify height on node 2:",self.nodes[2].getblockcount()
157  print "Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir)
158 
159  print "Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)"
160  self.nodes[0].generate(220) #node 0 has many large tx's in its mempool from the disconnects
161  sync_blocks(self.nodes[0:3])
162 
163  usage = calc_usage(self.prunedir)
164  print "Usage should be below target:", usage
165  if (usage > 550):
166  raise AssertionError("Pruning target not being met")
167 
168  return invalidheight,badhash
169 
170  def reorg_back(self):
171  # Verify that a block on the old main chain fork has been pruned away
172  try:
173  self.nodes[2].getblock(self.forkhash)
174  raise AssertionError("Old block wasn't pruned so can't test redownload")
175  except JSONRPCException as e:
176  print "Will need to redownload block",self.forkheight
177 
178  # Verify that we have enough history to reorg back to the fork point
179  # Although this is more than 288 blocks, because this chain was written more recently
180  # and only its other 299 small and 220 large block are in the block files after it,
181  # its expected to still be retained
182  self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
183 
184  first_reorg_height = self.nodes[2].getblockcount()
185  curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
186  self.nodes[2].invalidateblock(curchainhash)
187  goalbestheight = self.mainchainheight
188  goalbesthash = self.mainchainhash2
189 
190  # As of 0.10 the current block download logic is not able to reorg to the original chain created in
191  # create_chain_with_stale_blocks because it doesn't know of any peer thats on that chain from which to
192  # redownload its missing blocks.
193  # Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
194  # because it has all the block data.
195  # However it must mine enough blocks to have a more work chain than the reorg_test chain in order
196  # to trigger node 2's block download logic.
197  # At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
198  if self.nodes[2].getblockcount() < self.mainchainheight:
199  blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
200  print "Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed:", blocks_to_mine
201  self.nodes[0].invalidateblock(curchainhash)
202  assert(self.nodes[0].getblockcount() == self.mainchainheight)
203  assert(self.nodes[0].getbestblockhash() == self.mainchainhash2)
204  goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
205  goalbestheight = first_reorg_height + 1
206 
207  print "Verify node 2 reorged back to the main chain, some blocks of which it had to redownload"
208  waitstart = time.time()
209  while self.nodes[2].getblockcount() < goalbestheight:
210  time.sleep(0.1)
211  if time.time() - waitstart > 900:
212  raise AssertionError("Node 2 didn't reorg to proper height")
213  assert(self.nodes[2].getbestblockhash() == goalbesthash)
214  # Verify we can now have the data for a block previously pruned
215  assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight)
216 
217  def mine_full_block(self, node, address):
218  # Want to create a full block
219  # We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
220  for j in xrange(14):
221  if len(self.utxo) < 14:
222  self.utxo = node.listunspent()
223  inputs=[]
224  outputs = {}
225  t = self.utxo.pop()
226  inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
227  remchange = t["amount"] - 100*self.relayfee # Fee must be above min relay rate for 66kb tx
228  outputs[address]=remchange
229  # Create a basic transaction that will send change back to ourself after account for a fee
230  # And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
231  # of txouts is stored and is the only thing we overwrite from the original transaction
232  rawtx = node.createrawtransaction(inputs, outputs)
233  newtx = rawtx[0:92]
234  newtx = newtx + self.txouts
235  newtx = newtx + rawtx[94:]
236  # Appears to be ever so slightly faster to sign with SIGHASH_NONE
237  signresult = node.signrawtransaction(newtx,None,None,"NONE")
238  txid = node.sendrawtransaction(signresult["hex"], True)
239  # Mine a full sized block which will be these transactions we just created
240  node.generate(1)
241 
242 
243  def run_test(self):
244  print "Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)"
245  print "Mining a big blockchain of 995 blocks"
246  self.create_big_chain()
247  # Chain diagram key:
248  # * blocks on main chain
249  # +,&,$,@ blocks on other forks
250  # X invalidated block
251  # N1 Node 1
252  #
253  # Start by mining a simple chain that all nodes have
254  # N0=N1=N2 **...*(995)
255 
256  print "Check that we haven't started pruning yet because we're below PruneAfterHeight"
257  self.test_height_min()
258  # Extend this chain past the PruneAfterHeight
259  # N0=N1=N2 **...*(1020)
260 
261  print "Check that we'll exceed disk space target if we have a very high stale block rate"
263  # Disconnect N0
264  # And mine a 24 block chain on N1 and a separate 25 block chain on N0
265  # N1=N2 **...*+...+(1044)
266  # N0 **...**...**(1045)
267  #
268  # reconnect nodes causing reorg on N1 and N2
269  # N1=N2 **...*(1020) *...**(1045)
270  # \
271  # +...+(1044)
272  #
273  # repeat this process until you have 12 stale forks hanging off the
274  # main chain on N1 and N2
275  # N0 *************************...***************************(1320)
276  #
277  # N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
278  # \ \ \
279  # +...+(1044) &.. $...$(1319)
280 
281  # Save some current chain state for later use
282  self.mainchainheight = self.nodes[2].getblockcount() #1320
284 
285  print "Check that we can survive a 288 block reorg still"
286  (self.forkheight,self.forkhash) = self.reorg_test() #(1033, )
287  # Now create a 288 block reorg by mining a longer chain on N1
288  # First disconnect N1
289  # Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
290  # N1 **...*(1020) **...**(1032)X..
291  # \
292  # ++...+(1031)X..
293  #
294  # Now mine 300 more blocks on N1
295  # N1 **...*(1020) **...**(1032) @@...@(1332)
296  # \ \
297  # \ X...
298  # \ \
299  # ++...+(1031)X.. ..
300  #
301  # Reconnect nodes and mine 220 more blocks on N1
302  # N1 **...*(1020) **...**(1032) @@...@@@(1552)
303  # \ \
304  # \ X...
305  # \ \
306  # ++...+(1031)X.. ..
307  #
308  # N2 **...*(1020) **...**(1032) @@...@@@(1552)
309  # \ \
310  # \ *...**(1320)
311  # \ \
312  # ++...++(1044) ..
313  #
314  # N0 ********************(1032) @@...@@@(1552)
315  # \
316  # *...**(1320)
317 
318  print "Test that we can rerequest a block we previously pruned if needed for a reorg"
319  self.reorg_back()
320  # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
321  # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
322  # original main chain (*), but will require redownload of some blocks
323  # In order to have a peer we think we can download from, must also perform this invalidation
324  # on N0 and mine a new longest chain to trigger.
325  # Final result:
326  # N0 ********************(1032) **...****(1553)
327  # \
328  # X@...@@@(1552)
329  #
330  # N2 **...*(1020) **...**(1032) **...****(1553)
331  # \ \
332  # \ X@...@@@(1552)
333  # \
334  # +..
335  #
336  # N1 doesn't change because 1033 on main chain (*) is invalid
337 
338  print "Done"
339 
340 if __name__ == '__main__':
341  PruneTest().main()
def setup_chain(self)
Definition: pruning.py:27
def mine_full_block(self, node, address)
Definition: pruning.py:217
volatile double sum
Definition: Examples.cpp:23
def calc_usage(blockdir)
Definition: pruning.py:17
def test_height_min(self)
Definition: pruning.py:65
UniValue getblock(const UniValue &params, bool fHelp)
Definition: blockchain.cpp:483
UniValue listunspent(const UniValue &params, bool fHelp)
Definition: rpcwallet.cpp:2533
def connect_nodes(from_connection, node_num)
Definition: util.py:343
UniValue getnewaddress(const UniValue &params, bool fHelp)
Definition: rpcwallet.cpp:113
def reorg_back(self)
Definition: pruning.py:170
UniValue getblockhash(const UniValue &params, bool fHelp)
Definition: blockchain.cpp:311
UniValue getblockcount(const UniValue &params, bool fHelp)
Definition: blockchain.cpp:131
def initialize_chain_clean(test_dir, num_nodes)
Definition: util.py:252
def gen_return_txouts()
Definition: util.py:559
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None)
Definition: util.py:281
def stop_node(node, i)
Definition: util.py:323
UniValue generate(const UniValue &params, bool fHelp)
Definition: mining.cpp:122
def setup_network(self)
Definition: pruning.py:31
def sync_blocks(rpc_connections, wait=1)
Definition: util.py:117
def create_big_chain(self)
Definition: pruning.py:54
def reorg_test(self)
Definition: pruning.py:117
def create_chain_with_staleblocks(self)
Definition: pruning.py:87
UniValue getbestblockhash(const UniValue &params, bool fHelp)
Definition: blockchain.cpp:148
def __init__(self)
Definition: pruning.py:22
UniValue getnetworkinfo(const UniValue &params, bool fHelp)
Definition: net.cpp:392
def run_test(self)
Definition: pruning.py:243