|
33 | 33 | import infra.concurrency |
34 | 34 | from collections import defaultdict |
35 | 35 | import ccf.read_ledger |
| 36 | +import re |
36 | 37 |
|
37 | 38 | from loguru import logger as LOG |
38 | 39 |
|
@@ -1749,6 +1750,67 @@ def test_error_message_on_failure_to_read_aci_sec_context(args): |
1749 | 1750 | ), f"Did not find expected log messages: {expected_log_messages}" |
1750 | 1751 |
|
1751 | 1752 |
|
| 1753 | +def test_error_message_on_failure_to_fetch_snapshot(const_args): |
| 1754 | + args = copy.deepcopy(const_args) |
| 1755 | + args.nodes = infra.e2e_args.min_nodes(args, 0) |
| 1756 | + with infra.network.network( |
| 1757 | + args.nodes, |
| 1758 | + args.binary_dir, |
| 1759 | + args.debug_nodes, |
| 1760 | + pdb=args.pdb, |
| 1761 | + ) as network: |
| 1762 | + network.start_and_open(args) |
| 1763 | + |
| 1764 | + primary, _ = network.find_primary() |
| 1765 | + |
| 1766 | + new_node = network.create_node("local://localhost") |
| 1767 | + |
| 1768 | + # Shut down primary to cause snapshot fetch to fail |
| 1769 | + primary.remote.stop() |
| 1770 | + |
| 1771 | + failed = False |
| 1772 | + try: |
| 1773 | + LOG.info("Starting join") |
| 1774 | + network.join_node( |
| 1775 | + new_node, |
| 1776 | + args.package, |
| 1777 | + args, |
| 1778 | + target_node=primary, |
| 1779 | + timeout=10, |
| 1780 | + from_snapshot=False, |
| 1781 | + wait_for_node_in_store=False, |
| 1782 | + ) |
| 1783 | + new_node.wait_for_node_to_join(timeout=5) |
| 1784 | + except Exception as e: |
| 1785 | + LOG.info(f"Joining node could not join as expected {e}") |
| 1786 | + failed = True |
| 1787 | + |
| 1788 | + assert failed, "Joining node could not join failed node as expected" |
| 1789 | + |
| 1790 | + expected_log_messages = [ |
| 1791 | + re.compile(r"Fetching snapshot from .* \(attempt 1/3\)"), |
| 1792 | + re.compile(r"Fetching snapshot from .* \(attempt 2/3\)"), |
| 1793 | + re.compile(r"Fetching snapshot from .* \(attempt 3/3\)"), |
| 1794 | + re.compile( |
| 1795 | + r"Exceeded maximum snapshot fetch retries \([0-9]+\), giving up" |
| 1796 | + ), |
| 1797 | + ] |
| 1798 | + |
| 1799 | + out_path, _ = new_node.get_logs() |
| 1800 | + for line in open(out_path, "r", encoding="utf-8").readlines(): |
| 1801 | + for expected in expected_log_messages: |
| 1802 | + match = re.search(expected, line) |
| 1803 | + if match: |
| 1804 | + expected_log_messages.remove(expected) |
| 1805 | + LOG.info(f"Found expected log message: {line}") |
| 1806 | + if len(expected_log_messages) == 0: |
| 1807 | + break |
| 1808 | + |
| 1809 | + assert ( |
| 1810 | + len(expected_log_messages) == 0 |
| 1811 | + ), f"Did not find expected log messages: {expected_log_messages}" |
| 1812 | + |
| 1813 | + |
1752 | 1814 | def run(args): |
1753 | 1815 | run_max_uncommitted_tx_count(args) |
1754 | 1816 | run_file_operations(args) |
|
0 commit comments