|
1 | 1 | use std::io::{Read, Seek, SeekFrom}; |
2 | | -use std::path::PathBuf; |
| 2 | +use std::path::{Path, PathBuf}; |
3 | 3 | use std::sync::Arc; |
4 | | -use std::time::Duration; |
5 | 4 | use std::{fs, io}; |
6 | 5 |
|
7 | | -use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; |
8 | | -use lightning::sign::{EntropySource, KeysManager, OutputSpender, SpendableOutputDescriptor}; |
| 6 | +use lightning::sign::{EntropySource, KeysManager, SpendableOutputDescriptor}; |
9 | 7 | use lightning::util::logger::Logger; |
10 | 8 | use lightning::util::persist::KVStore; |
11 | 9 | use lightning::util::ser::{Readable, WithoutLength, Writeable}; |
12 | 10 |
|
13 | 11 | use lightning_persister::fs_store::FilesystemStore; |
14 | 12 |
|
15 | | -use bitcoin::blockdata::locktime::absolute::LockTime; |
16 | | -use bitcoin::secp256k1::Secp256k1; |
17 | | -use rand::{thread_rng, Rng}; |
18 | | - |
| 13 | +use crate::disk::FilesystemLogger; |
19 | 14 | use crate::hex_utils; |
20 | | -use crate::BitcoindClient; |
21 | | -use crate::ChannelManager; |
22 | | -use crate::FilesystemLogger; |
| 15 | +use crate::OutputSweeper; |
| 16 | + |
| 17 | +const DEPRECATED_PENDING_SPENDABLE_OUTPUT_DIR: &'static str = "pending_spendable_outputs"; |
23 | 18 |
|
24 | | -/// If we have any pending claimable outputs, we should slowly sweep them to our Bitcoin Core |
25 | | -/// wallet. We technically don't need to do this - they're ours to spend when we want and can just |
26 | | -/// use them to build new transactions instead, but we cannot feed them direclty into Bitcoin |
27 | | -/// Core's wallet so we have to sweep. |
28 | | -/// |
29 | | -/// Note that this is unececssary for [`SpendableOutputDescriptor::StaticOutput`]s, which *do* have |
30 | | -/// an associated secret key we could simply import into Bitcoin Core's wallet, but for consistency |
31 | | -/// we don't do that here either. |
32 | | -pub(crate) async fn periodic_sweep( |
| 19 | +/// We updated to use LDK's OutputSweeper as part of upgrading to LDK 0.0.123, so migrate away from |
| 20 | +/// the old sweep persistence. |
| 21 | +pub(crate) async fn migrate_deprecated_spendable_outputs( |
33 | 22 | ldk_data_dir: String, keys_manager: Arc<KeysManager>, logger: Arc<FilesystemLogger>, |
34 | | - persister: Arc<FilesystemStore>, bitcoind_client: Arc<BitcoindClient>, |
35 | | - channel_manager: Arc<ChannelManager>, |
| 23 | + persister: Arc<FilesystemStore>, sweeper: Arc<OutputSweeper>, |
36 | 24 | ) { |
37 | | - // Regularly claim outputs which are exclusively spendable by us and send them to Bitcoin Core. |
38 | | - // Note that if you more tightly integrate your wallet with LDK you may not need to do this - |
39 | | - // these outputs can just be treated as normal outputs during coin selection. |
| 25 | + lightning::log_info!(&*logger, "Beginning migration of deprecated spendable outputs"); |
40 | 26 | let pending_spendables_dir = |
41 | | - format!("{}/{}", ldk_data_dir, crate::PENDING_SPENDABLE_OUTPUT_DIR); |
| 27 | + format!("{}/{}", ldk_data_dir, DEPRECATED_PENDING_SPENDABLE_OUTPUT_DIR); |
42 | 28 | let processing_spendables_dir = format!("{}/processing_spendable_outputs", ldk_data_dir); |
43 | 29 | let spendables_dir = format!("{}/spendable_outputs", ldk_data_dir); |
44 | 30 |
|
45 | | - // We batch together claims of all spendable outputs generated each day, however only after |
46 | | - // batching any claims of spendable outputs which were generated prior to restart. On a mobile |
47 | | - // device we likely won't ever be online for more than a minute, so we have to ensure we sweep |
48 | | - // any pending claims on startup, but for an always-online node you may wish to sweep even less |
49 | | - // frequently than this (or move the interval await to the top of the loop)! |
50 | | - // |
51 | | - // There is no particular rush here, we just have to ensure funds are availably by the time we |
52 | | - // need to send funds. |
53 | | - let mut interval = tokio::time::interval(Duration::from_secs(60 * 60 * 24)); |
| 31 | + if !Path::new(&pending_spendables_dir).exists() |
| 32 | + && !Path::new(&processing_spendables_dir).exists() |
| 33 | + && !Path::new(&spendables_dir).exists() |
| 34 | + { |
| 35 | + lightning::log_info!(&*logger, "No deprecated spendable outputs to migrate, returning"); |
| 36 | + return; |
| 37 | + } |
54 | 38 |
|
55 | | - loop { |
56 | | - interval.tick().await; // Note that the first tick completes immediately |
57 | | - if let Ok(dir_iter) = fs::read_dir(&pending_spendables_dir) { |
58 | | - // Move any spendable descriptors from pending folder so that we don't have any |
59 | | - // races with new files being added. |
60 | | - for file_res in dir_iter { |
61 | | - let file = file_res.unwrap(); |
62 | | - // Only move a file if its a 32-byte-hex'd filename, otherwise it might be a |
63 | | - // temporary file. |
64 | | - if file.file_name().len() == 64 { |
65 | | - fs::create_dir_all(&processing_spendables_dir).unwrap(); |
66 | | - let mut holding_path = PathBuf::new(); |
67 | | - holding_path.push(&processing_spendables_dir); |
68 | | - holding_path.push(&file.file_name()); |
69 | | - fs::rename(file.path(), holding_path).unwrap(); |
70 | | - } |
| 39 | + if let Ok(dir_iter) = fs::read_dir(&pending_spendables_dir) { |
| 40 | + // Move any spendable descriptors from pending folder so that we don't have any |
| 41 | + // races with new files being added. |
| 42 | + for file_res in dir_iter { |
| 43 | + let file = file_res.unwrap(); |
| 44 | + // Only move a file if its a 32-byte-hex'd filename, otherwise it might be a |
| 45 | + // temporary file. |
| 46 | + if file.file_name().len() == 64 { |
| 47 | + fs::create_dir_all(&processing_spendables_dir).unwrap(); |
| 48 | + let mut holding_path = PathBuf::new(); |
| 49 | + holding_path.push(&processing_spendables_dir); |
| 50 | + holding_path.push(&file.file_name()); |
| 51 | + fs::rename(file.path(), holding_path).unwrap(); |
71 | 52 | } |
72 | | - // Now concatenate all the pending files we moved into one file in the |
73 | | - // `spendable_outputs` directory and drop the processing directory. |
74 | | - let mut outputs = Vec::new(); |
75 | | - if let Ok(processing_iter) = fs::read_dir(&processing_spendables_dir) { |
76 | | - for file_res in processing_iter { |
77 | | - outputs.append(&mut fs::read(file_res.unwrap().path()).unwrap()); |
78 | | - } |
79 | | - } |
80 | | - if !outputs.is_empty() { |
81 | | - let key = hex_utils::hex_str(&keys_manager.get_secure_random_bytes()); |
82 | | - persister |
83 | | - .write("spendable_outputs", "", &key, &WithoutLength(&outputs).encode()) |
84 | | - .unwrap(); |
85 | | - fs::remove_dir_all(&processing_spendables_dir).unwrap(); |
| 53 | + } |
| 54 | + // Now concatenate all the pending files we moved into one file in the |
| 55 | + // `spendable_outputs` directory and drop the processing directory. |
| 56 | + let mut outputs = Vec::new(); |
| 57 | + if let Ok(processing_iter) = fs::read_dir(&processing_spendables_dir) { |
| 58 | + for file_res in processing_iter { |
| 59 | + outputs.append(&mut fs::read(file_res.unwrap().path()).unwrap()); |
86 | 60 | } |
87 | 61 | } |
88 | | - // Iterate over all the sets of spendable outputs in `spendables_dir` and try to claim |
89 | | - // them. |
90 | | - // Note that here we try to claim each set of spendable outputs over and over again |
91 | | - // forever, even long after its been claimed. While this isn't an issue per se, in practice |
92 | | - // you may wish to track when the claiming transaction has confirmed and remove the |
93 | | - // spendable outputs set. You may also wish to merge groups of unspent spendable outputs to |
94 | | - // combine batches. |
95 | | - if let Ok(dir_iter) = fs::read_dir(&spendables_dir) { |
96 | | - for file_res in dir_iter { |
97 | | - let mut outputs: Vec<SpendableOutputDescriptor> = Vec::new(); |
98 | | - let mut file = fs::File::open(file_res.unwrap().path()).unwrap(); |
99 | | - loop { |
100 | | - // Check if there are any bytes left to read, and if so read a descriptor. |
101 | | - match file.read_exact(&mut [0; 1]) { |
102 | | - Ok(_) => { |
103 | | - file.seek(SeekFrom::Current(-1)).unwrap(); |
104 | | - }, |
105 | | - Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => break, |
106 | | - Err(e) => Err(e).unwrap(), |
107 | | - } |
108 | | - outputs.push(Readable::read(&mut file).unwrap()); |
109 | | - } |
110 | | - let destination_address = bitcoind_client.get_new_address().await; |
111 | | - let output_descriptors = &outputs.iter().map(|a| a).collect::<Vec<_>>(); |
112 | | - let tx_feerate = bitcoind_client |
113 | | - .get_est_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum); |
114 | | - |
115 | | - // We set nLockTime to the current height to discourage fee sniping. |
116 | | - // Occasionally randomly pick a nLockTime even further back, so |
117 | | - // that transactions that are delayed after signing for whatever reason, |
118 | | - // e.g. high-latency mix networks and some CoinJoin implementations, have |
119 | | - // better privacy. |
120 | | - // Logic copied from core: https://github.com/bitcoin/bitcoin/blob/1d4846a8443be901b8a5deb0e357481af22838d0/src/wallet/spend.cpp#L936 |
121 | | - let mut cur_height = channel_manager.current_best_block().height; |
122 | | - |
123 | | - // 10% of the time |
124 | | - if thread_rng().gen_range(0, 10) == 0 { |
125 | | - // subtract random number between 0 and 100 |
126 | | - cur_height = cur_height.saturating_sub(thread_rng().gen_range(0, 100)); |
127 | | - } |
| 62 | + if !outputs.is_empty() { |
| 63 | + let key = hex_utils::hex_str(&keys_manager.get_secure_random_bytes()); |
| 64 | + persister |
| 65 | + .write("spendable_outputs", "", &key, &WithoutLength(&outputs).encode()) |
| 66 | + .unwrap(); |
| 67 | + fs::remove_dir_all(&processing_spendables_dir).unwrap(); |
| 68 | + } |
| 69 | + } |
128 | 70 |
|
129 | | - let locktime = |
130 | | - LockTime::from_height(cur_height).map_or(LockTime::ZERO, |l| l.into()); |
| 71 | + let best_block = sweeper.current_best_block(); |
131 | 72 |
|
132 | | - if let Ok(spending_tx) = keys_manager.spend_spendable_outputs( |
133 | | - output_descriptors, |
134 | | - Vec::new(), |
135 | | - destination_address.script_pubkey(), |
136 | | - tx_feerate, |
137 | | - Some(locktime), |
138 | | - &Secp256k1::new(), |
139 | | - ) { |
140 | | - // Note that, most likely, we've already sweeped this set of outputs |
141 | | - // and they're already confirmed on-chain, so this broadcast will fail. |
142 | | - bitcoind_client.broadcast_transactions(&[&spending_tx]); |
143 | | - } else { |
144 | | - lightning::log_error!( |
145 | | - logger, |
146 | | - "Failed to sweep spendable outputs! This may indicate the outputs are dust. Will try again in a day."); |
| 73 | + let mut outputs: Vec<SpendableOutputDescriptor> = Vec::new(); |
| 74 | + if let Ok(dir_iter) = fs::read_dir(&spendables_dir) { |
| 75 | + for file_res in dir_iter { |
| 76 | + let mut file = fs::File::open(file_res.unwrap().path()).unwrap(); |
| 77 | + loop { |
| 78 | + // Check if there are any bytes left to read, and if so read a descriptor. |
| 79 | + match file.read_exact(&mut [0; 1]) { |
| 80 | + Ok(_) => { |
| 81 | + file.seek(SeekFrom::Current(-1)).unwrap(); |
| 82 | + }, |
| 83 | + Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => break, |
| 84 | + Err(e) => Err(e).unwrap(), |
147 | 85 | } |
| 86 | + outputs.push(Readable::read(&mut file).unwrap()); |
148 | 87 | } |
149 | 88 | } |
150 | 89 | } |
| 90 | + |
| 91 | + let spend_delay = Some(best_block.height + 2); |
| 92 | + sweeper.track_spendable_outputs(outputs.clone(), None, false, spend_delay).unwrap(); |
| 93 | + |
| 94 | + fs::remove_dir_all(&spendables_dir).unwrap(); |
| 95 | + fs::remove_dir_all(&pending_spendables_dir).unwrap(); |
| 96 | + |
| 97 | + lightning::log_info!( |
| 98 | + &*logger, |
| 99 | + "Successfully migrated {} deprecated spendable outputs", |
| 100 | + outputs.len() |
| 101 | + ); |
151 | 102 | } |
0 commit comments