Diagnosed failure

MasterReplicationAndRpcSizeLimitTest.TabletReports: WARNING: ThreadSanitizer: data race (pid=15812)  Write of size 8 at 0x7b0800003cc8 by main thread:
    #0 operator delete(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_new_delete.cpp:126 (master_replication-itest+0x342b39)
    #1 std::__1::_DeallocateCaller::__do_call(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:334:12 (master_replication-itest+0x34eac9)
    #2 std::__1::_DeallocateCaller::__do_deallocate_handle_size(void*, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:292:12 (master_replication-itest+0x34ea69)
    #3 std::__1::_DeallocateCaller::__do_deallocate_handle_size_align(void*, unsigned long, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:268:14 (libkrpc.so+0x126292)
    #4 std::__1::__libcpp_deallocate(void*, unsigned long, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:340:3 (libkrpc.so+0x126229)
    #5 std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> >::deallocate(std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1798:10 (libkrpc.so+0x139f79)
    #6 std::__1::allocator_traits<std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> > >::deallocate(std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> >&, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1533:14 (libkrpc.so+0x139e89)
    #7 std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::destroy(std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1837:9 (libkrpc.so+0x139d71)
    #8 std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::~__tree() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1824:3 (libkrpc.so+0x139ce4)
    #9 std::__1::set<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::~set() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/set:605:5 (libkrpc.so+0x133129)
    #10 cxa_at_exit_wrapper(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:394 (master_replication-itest+0x2c28bf)

  Previous read of size 8 at 0x7b0800003cc8 by thread T264:
    #0 std::__1::__tree_end_node<std::__1::__tree_node_base<void*>*>* std::__1::__tree_next_iter<std::__1::__tree_end_node<std::__1::__tree_node_base<void*>*>*, std::__1::__tree_node_base<void*>*>(std::__1::__tree_node_base<void*>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:182:14 (libkrpc.so+0x13db3a)
    #1 std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long>::operator++() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:929:11 (libkrpc.so+0x133df2)
    #2 void std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::__assign_multi<std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long> >(std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long>, std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long>) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1696:31 (libkrpc.so+0x13bcb2)
    #3 std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::operator=(std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> > const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1648:9 (libkrpc.so+0x13bb09)
    #4 std::__1::set<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::operator=(std::__1::set<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> > const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/set:540:21 (libkrpc.so+0x133c10)
    #5 kudu::rpc::ClientNegotiation::SendNegotiate() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/client_negotiation.cc:327:20 (libkrpc.so+0x12b3b6)
    #6 kudu::rpc::ClientNegotiation::Negotiate(std::__1::unique_ptr<kudu::rpc::ErrorStatusPB, std::__1::default_delete<kudu::rpc::ErrorStatusPB> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/client_negotiation.cc:185:5 (libkrpc.so+0x12abeb)
    #7 kudu::rpc::DoClientNegotiation(kudu::rpc::Connection*, kudu::TriStateFlag, kudu::TriStateFlag, bool, kudu::MonoTime, std::__1::unique_ptr<kudu::rpc::ErrorStatusPB, std::__1::default_delete<kudu::rpc::ErrorStatusPB> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/negotiation.cc:231:3 (libkrpc.so+0x180d5b)
    #8 kudu::rpc::Negotiation::RunNegotiation(scoped_refptr<kudu::rpc::Connection> const&, kudu::TriStateFlag, kudu::TriStateFlag, bool, kudu::MonoTime) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/negotiation.cc:317:9 (libkrpc.so+0x17fea2)
    #9 kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:631:3 (libkrpc.so+0x19f37c)
    #10 decltype(std::__1::forward<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&>(fp)()) std::__1::__invoke<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&>(kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/type_traits:3905:1 (libkrpc.so+0x19f2f9)
    #11 void std::__1::__invoke_void_return_wrapper<void>::__call<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&>(kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__functional_base:348:9 (libkrpc.so+0x19f289)
    #12 std::__1::__function::__alloc_func<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1, std::__1::allocator<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1557:16 (libkrpc.so+0x19f251)
    #13 std::__1::__function::__func<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1, std::__1::allocator<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1731:12 (libkrpc.so+0x19e52d)
    #14 std::__1::__function::__value_func<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1884:16 (libtserver_test_util.so+0x60694)
    #15 std::__1::function<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:2556:12 (libtserver_test_util.so+0x604c9)
    #16 kudu::ThreadPool::DispatchThread() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:837:7 (libkudu_util.so+0x467df4)
    #17 kudu::ThreadPool::CreateThread()::$_2::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:919:48 (libkudu_util.so+0x46b261)
    #18 decltype(std::__1::forward<kudu::ThreadPool::CreateThread()::$_2&>(fp)()) std::__1::__invoke<kudu::ThreadPool::CreateThread()::$_2&>(kudu::ThreadPool::CreateThread()::$_2&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/type_traits:3905:1 (libkudu_util.so+0x46b219)
    #19 void std::__1::__invoke_void_return_wrapper<void>::__call<kudu::ThreadPool::CreateThread()::$_2&>(kudu::ThreadPool::CreateThread()::$_2&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__functional_base:348:9 (libkudu_util.so+0x46b1a9)
    #20 std::__1::__function::__alloc_func<kudu::ThreadPool::CreateThread()::$_2, std::__1::allocator<kudu::ThreadPool::CreateThread()::$_2>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1557:16 (libkudu_util.so+0x46b171)
    #21 std::__1::__function::__func<kudu::ThreadPool::CreateThread()::$_2, std::__1::allocator<kudu::ThreadPool::CreateThread()::$_2>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1731:12 (libkudu_util.so+0x46a46d)
    #22 std::__1::__function::__value_func<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1884:16 (libtserver_test_util.so+0x60694)
    #23 std::__1::function<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:2556:12 (libtserver_test_util.so+0x604c9)
    #24 kudu::Thread::SuperviseThread(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.cc:698:3 (libkudu_util.so+0x44a116)

  As if synchronized via sleep:
    #0 nanosleep /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:363 (master_replication-itest+0x2e7da2)
    #1 base::SleepForNanoseconds(long) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/gutil/sysinfo.cc:96:10 (libgutil.so+0xb5f0a)
    #2 kudu::SleepFor(kudu::MonoDelta const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/monotime.cc:264:3 (libkudu_util.so+0x3ddd86)
    #3 kudu::Subprocess::KillAndWait(int) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/subprocess.cc:692:7 (libkudu_util.so+0x43c190)
    #4 kudu::clock::MiniChronyd::Stop() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/clock/test/mini_chronyd.cc:251:16 (libmini_chronyd.so+0x15190)
    #5 kudu::clock::MiniChronyd::~MiniChronyd() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/clock/test/mini_chronyd.cc:166:5 (libmini_chronyd.so+0x14e8e)
    #6 std::__1::default_delete<kudu::clock::MiniChronyd>::operator()(kudu::clock::MiniChronyd*) const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2262:5 (libmini_cluster.so+0xc18fe)
    #7 std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >::reset(kudu::clock::MiniChronyd*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2517:7 (libmini_cluster.so+0xc186d)
    #8 std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >::~unique_ptr() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2471:19 (libmini_cluster.so+0x9cc2b)
    #9 std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >::destroy(std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1811:92 (libmini_cluster.so+0xb2169)
    #10 void std::__1::allocator_traits<std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::__destroy<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >(std::__1::integral_constant<bool, true>, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >&, std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1703:21 (libmini_cluster.so+0xb2140)
    #11 void std::__1::allocator_traits<std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::destroy<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >(std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >&, std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1544:14 (libmini_cluster.so+0xb2100)
    #12 std::__1::__vector_base<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::__destruct_at_end(std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:428:9 (libmini_cluster.so+0xb20b6)
    #13 std::__1::__vector_base<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::clear() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:371:29 (libmini_cluster.so+0xb1fd4)
    #14 std::__1::__vector_base<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::~__vector_base() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:465:9 (libmini_cluster.so+0xb1dcb)
    #15 std::__1::vector<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::~vector() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:557:5 (libmini_cluster.so+0x98161)
    #16 kudu::cluster::ExternalMiniCluster::~ExternalMiniCluster() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/mini-cluster/external_mini_cluster.cc:173:1 (libmini_cluster.so+0x8457e)
    #17 kudu::cluster::ExternalMiniCluster::~ExternalMiniCluster() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/mini-cluster/external_mini_cluster.cc:171:45 (libmini_cluster.so+0x84689)
    #18 std::__1::default_delete<kudu::cluster::ExternalMiniCluster>::operator()(kudu::cluster::ExternalMiniCluster*) const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2262:5 (master_replication-itest+0x355737)
    #19 std::__1::unique_ptr<kudu::cluster::ExternalMiniCluster, std::__1::default_delete<kudu::cluster::ExternalMiniCluster> >::reset(kudu::cluster::ExternalMiniCluster*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2517:7 (master_replication-itest+0x35569d)
    #20 std::__1::unique_ptr<kudu::cluster::ExternalMiniCluster, std::__1::default_delete<kudu::cluster::ExternalMiniCluster> >::~unique_ptr() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2471:19 (master_replication-itest+0x35552b)
    #21 kudu::master::MasterReplicationAndRpcSizeLimitTest::~MasterReplicationAndRpcSizeLimitTest() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/integration-tests/master_replication-itest.cc:370:7 (master_replication-itest+0x34dff6)
    #22 kudu::master::MasterReplicationAndRpcSizeLimitTest_TabletReports_Test::~MasterReplicationAndRpcSizeLimitTest_TabletReports_Test() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/integration-tests/master_replication-itest.cc:536:1 (master_replication-itest+0x34e029)
    #23 testing::Test::DeleteSelf_() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/include/gtest/gtest.h:318:24 (libgtest.so.1.12.1+0x65467)
    #24 void testing::internal::HandleSehExceptionsInMethodIfSupported<testing::Test, void>(testing::Test*, void (testing::Test::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2599:10 (libgtest.so.1.12.1+0x64e2f)
    #25 void testing::internal::HandleExceptionsInMethodIfSupported<testing::Test, void>(testing::Test*, void (testing::Test::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2635:14 (libgtest.so.1.12.1+0x64e2f)
    #26 testing::TestInfo::Run() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2859:5 (libgtest.so.1.12.1+0x43dc2)
    #27 testing::TestSuite::Run() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:3012:30 (libgtest.so.1.12.1+0x44d24)
    #28 testing::internal::UnitTestImpl::RunAllTests() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:5870:44 (libgtest.so.1.12.1+0x59814)
    #29 bool testing::internal::HandleSehExceptionsInMethodIfSupported<testing::internal::UnitTestImpl, bool>(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2599:10 (libgtest.so.1.12.1+0x65cef)
    #30 bool testing::internal::HandleExceptionsInMethodIfSupported<testing::internal::UnitTestImpl, bool>(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2635:14 (libgtest.so.1.12.1+0x65cef)
    #31 testing::UnitTest::Run() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:5444:10 (libgtest.so.1.12.1+0x58dcc)
    #32 RUN_ALL_TESTS() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/gtest/gtest.h:2293:73 (master_replication-itest+0x36544b)
    #33 main /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/test_main.cc:109:10 (master_replication-itest+0x36434c)

  Thread T264 'client-negotiat' (tid=21891, finished) created by thread T102 at:
    #0 pthread_create /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:966 (master_replication-itest+0x2c6875)
    #1 kudu::Thread::StartThread(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::function<void ()>, unsigned long, scoped_refptr<kudu::Thread>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.cc:642:15 (libkudu_util.so+0x44997a)
    #2 kudu::Thread::Create(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::function<void ()>, scoped_refptr<kudu::Thread>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.h:146:12 (libmaster.so+0x2d2ae9)
    #3 kudu::ThreadPool::CreateThread() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:918:10 (libkudu_util.so+0x46702b)
    #4 kudu::ThreadPool::DoSubmit(std::__1::function<void ()>, kudu::ThreadPoolToken*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:703:21 (libkudu_util.so+0x464f7f)
    #5 kudu::ThreadPool::Submit(std::__1::function<void ()>) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:584:10 (libkudu_util.so+0x467592)
    #6 kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:631:3 (libkrpc.so+0x199288)
    #7 kudu::rpc::ReactorThread::FindOrStartConnection(kudu::rpc::ConnectionId const&, kudu::rpc::CredentialsPolicy, scoped_refptr<kudu::rpc::Connection>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:591:14 (libkrpc.so+0x199ce1)
    #8 kudu::rpc::ReactorThread::AssignOutboundCall(std::__1::shared_ptr<kudu::rpc::OutboundCall>) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:375:14 (libkrpc.so+0x1998a6)
    #9 kudu::rpc::AssignOutboundCallTask::Run(kudu::rpc::ReactorThread*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:907:14 (libkrpc.so+0x1a514e)
    #10 kudu::rpc::ReactorThread::AsyncHandler(ev::async&, int) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:349:10 (libkrpc.so+0x198c12)
    #11 void ev::base<ev_async, ev::async>::method_thunk<kudu::rpc::ReactorThread, &(kudu::rpc::ReactorThread::AsyncHandler(ev::async&, int))>(ev_loop*, ev_async*, int) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/ev++.h:497:7 (libkrpc.so+0x1a6373)
    #12 ev_invoke_pending /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/libev-4.33/ev.c:3770:11 (libev.so.4+0x9d31)
    #13 kudu::rpc::ReactorThread::InvokePendingCb(ev_loop*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:205:3 (libkrpc.so+0x197ae5)
    #14 ev_run /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/libev-4.33/ev.c:4190:7 (libev.so.4+0xaf9a)
    #15 ev::loop_ref::run(int) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/ev++.h:211:7 (libkrpc.so+0x1a2bf8)
    #16 kudu::rpc::ReactorThread::RunThread() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:510:9 (libkrpc.so+0x19b0c5)
    #17 kudu::rpc::ReactorThread::Init()::$_0::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:197:48 (libkrpc.so+0x19de11)
    #18 decltype(std::__1::forward<kudu::rpc::ReactorThread::Init()::$_0&>(fp)()) std::__1::__invoke<kudu::rpc::ReactorThread::Init()::$_0&>(kudu::rpc::ReactorThread::Init()::$_0&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/type_traits:3905:1 (libkrpc.so+0x19ddc9)
    #19 void std::__1::__invoke_void_return_wrapper<void>::__call<kudu::rpc::ReactorThread::Init()::$_0&>(kudu::rpc::ReactorThread::Init()::$_0&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__functional_base:348:9 (libkrpc.so+0x19dd59)
    #20 std::__1::__function::__alloc_func<kudu::rpc::ReactorThread::Init()::$_0, std::__1::allocator<kudu::rpc::ReactorThread::Init()::$_0>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1557:16 (libkrpc.so+0x19dd21)
    #21 std::__1::__function::__func<kudu::rpc::ReactorThread::Init()::$_0, std::__1::allocator<kudu::rpc::ReactorThread::Init()::$_0>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1731:12 (libkrpc.so+0x19d01d)
    #22 std::__1::__function::__value_func<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1884:16 (libtserver_test_util.so+0x60694)
    #23 std::__1::function<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:2556:12 (libtserver_test_util.so+0x604c9)
    #24 kudu::Thread::SuperviseThread(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.cc:698:3 (libkudu_util.so+0x44a116)


MasterReplicationAndRpcSizeLimitTest.TabletReports: WARNING: ThreadSanitizer: data race (pid=15812)  Write of size 8 at 0x7b0800003cd0 by main thread:
    #0 operator delete(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_new_delete.cpp:126 (master_replication-itest+0x342b39)
    #1 std::__1::_DeallocateCaller::__do_call(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:334:12 (master_replication-itest+0x34eac9)
    #2 std::__1::_DeallocateCaller::__do_deallocate_handle_size(void*, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:292:12 (master_replication-itest+0x34ea69)
    #3 std::__1::_DeallocateCaller::__do_deallocate_handle_size_align(void*, unsigned long, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:268:14 (libkrpc.so+0x126292)
    #4 std::__1::__libcpp_deallocate(void*, unsigned long, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:340:3 (libkrpc.so+0x126229)
    #5 std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> >::deallocate(std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1798:10 (libkrpc.so+0x139f79)
    #6 std::__1::allocator_traits<std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> > >::deallocate(std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> >&, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1533:14 (libkrpc.so+0x139e89)
    #7 std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::destroy(std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1837:9 (libkrpc.so+0x139d71)
    #8 std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::~__tree() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1824:3 (libkrpc.so+0x139ce4)
    #9 std::__1::set<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::~set() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/set:605:5 (libkrpc.so+0x133129)
    #10 cxa_at_exit_wrapper(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:394 (master_replication-itest+0x2c28bf)

  Previous read of size 8 at 0x7b0800003cd0 by thread T264:
    #0 bool std::__1::__tree_is_left_child<std::__1::__tree_node_base<void*>*>(std::__1::__tree_node_base<void*>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:81:24 (libkrpc.so+0x13c67b)
    #1 std::__1::__tree_end_node<std::__1::__tree_node_base<void*>*>* std::__1::__tree_next_iter<std::__1::__tree_end_node<std::__1::__tree_node_base<void*>*>*, std::__1::__tree_node_base<void*>*>(std::__1::__tree_node_base<void*>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:184:13 (libkrpc.so+0x13db64)
    #2 std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long>::operator++() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:929:11 (libkrpc.so+0x133df2)
    #3 void std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::__assign_multi<std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long> >(std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long>, std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long>) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1696:31 (libkrpc.so+0x13bcb2)
    #4 std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::operator=(std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> > const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1648:9 (libkrpc.so+0x13bb09)
    #5 std::__1::set<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::operator=(std::__1::set<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> > const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/set:540:21 (libkrpc.so+0x133c10)
    #6 kudu::rpc::ClientNegotiation::SendNegotiate() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/client_negotiation.cc:327:20 (libkrpc.so+0x12b3b6)
    #7 kudu::rpc::ClientNegotiation::Negotiate(std::__1::unique_ptr<kudu::rpc::ErrorStatusPB, std::__1::default_delete<kudu::rpc::ErrorStatusPB> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/client_negotiation.cc:185:5 (libkrpc.so+0x12abeb)
    #8 kudu::rpc::DoClientNegotiation(kudu::rpc::Connection*, kudu::TriStateFlag, kudu::TriStateFlag, bool, kudu::MonoTime, std::__1::unique_ptr<kudu::rpc::ErrorStatusPB, std::__1::default_delete<kudu::rpc::ErrorStatusPB> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/negotiation.cc:231:3 (libkrpc.so+0x180d5b)
    #9 kudu::rpc::Negotiation::RunNegotiation(scoped_refptr<kudu::rpc::Connection> const&, kudu::TriStateFlag, kudu::TriStateFlag, bool, kudu::MonoTime) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/negotiation.cc:317:9 (libkrpc.so+0x17fea2)
    #10 kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:631:3 (libkrpc.so+0x19f37c)
    #11 decltype(std::__1::forward<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&>(fp)()) std::__1::__invoke<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&>(kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/type_traits:3905:1 (libkrpc.so+0x19f2f9)
    #12 void std::__1::__invoke_void_return_wrapper<void>::__call<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&>(kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__functional_base:348:9 (libkrpc.so+0x19f289)
    #13 std::__1::__function::__alloc_func<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1, std::__1::allocator<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1557:16 (libkrpc.so+0x19f251)
    #14 std::__1::__function::__func<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1, std::__1::allocator<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1731:12 (libkrpc.so+0x19e52d)
    #15 std::__1::__function::__value_func<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1884:16 (libtserver_test_util.so+0x60694)
    #16 std::__1::function<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:2556:12 (libtserver_test_util.so+0x604c9)
    #17 kudu::ThreadPool::DispatchThread() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:837:7 (libkudu_util.so+0x467df4)
    #18 kudu::ThreadPool::CreateThread()::$_2::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:919:48 (libkudu_util.so+0x46b261)
    #19 decltype(std::__1::forward<kudu::ThreadPool::CreateThread()::$_2&>(fp)()) std::__1::__invoke<kudu::ThreadPool::CreateThread()::$_2&>(kudu::ThreadPool::CreateThread()::$_2&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/type_traits:3905:1 (libkudu_util.so+0x46b219)
    #20 void std::__1::__invoke_void_return_wrapper<void>::__call<kudu::ThreadPool::CreateThread()::$_2&>(kudu::ThreadPool::CreateThread()::$_2&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__functional_base:348:9 (libkudu_util.so+0x46b1a9)
    #21 std::__1::__function::__alloc_func<kudu::ThreadPool::CreateThread()::$_2, std::__1::allocator<kudu::ThreadPool::CreateThread()::$_2>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1557:16 (libkudu_util.so+0x46b171)
    #22 std::__1::__function::__func<kudu::ThreadPool::CreateThread()::$_2, std::__1::allocator<kudu::ThreadPool::CreateThread()::$_2>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1731:12 (libkudu_util.so+0x46a46d)
    #23 std::__1::__function::__value_func<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1884:16 (libtserver_test_util.so+0x60694)
    #24 std::__1::function<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:2556:12 (libtserver_test_util.so+0x604c9)
    #25 kudu::Thread::SuperviseThread(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.cc:698:3 (libkudu_util.so+0x44a116)

  As if synchronized via sleep:
    #0 nanosleep /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:363 (master_replication-itest+0x2e7da2)
    #1 base::SleepForNanoseconds(long) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/gutil/sysinfo.cc:96:10 (libgutil.so+0xb5f0a)
    #2 kudu::SleepFor(kudu::MonoDelta const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/monotime.cc:264:3 (libkudu_util.so+0x3ddd86)
    #3 kudu::Subprocess::KillAndWait(int) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/subprocess.cc:692:7 (libkudu_util.so+0x43c190)
    #4 kudu::clock::MiniChronyd::Stop() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/clock/test/mini_chronyd.cc:251:16 (libmini_chronyd.so+0x15190)
    #5 kudu::clock::MiniChronyd::~MiniChronyd() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/clock/test/mini_chronyd.cc:166:5 (libmini_chronyd.so+0x14e8e)
    #6 std::__1::default_delete<kudu::clock::MiniChronyd>::operator()(kudu::clock::MiniChronyd*) const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2262:5 (libmini_cluster.so+0xc18fe)
    #7 std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >::reset(kudu::clock::MiniChronyd*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2517:7 (libmini_cluster.so+0xc186d)
    #8 std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >::~unique_ptr() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2471:19 (libmini_cluster.so+0x9cc2b)
    #9 std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >::destroy(std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1811:92 (libmini_cluster.so+0xb2169)
    #10 void std::__1::allocator_traits<std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::__destroy<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >(std::__1::integral_constant<bool, true>, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >&, std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1703:21 (libmini_cluster.so+0xb2140)
    #11 void std::__1::allocator_traits<std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::destroy<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >(std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >&, std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1544:14 (libmini_cluster.so+0xb2100)
    #12 std::__1::__vector_base<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::__destruct_at_end(std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:428:9 (libmini_cluster.so+0xb20b6)
    #13 std::__1::__vector_base<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::clear() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:371:29 (libmini_cluster.so+0xb1fd4)
    #14 std::__1::__vector_base<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::~__vector_base() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:465:9 (libmini_cluster.so+0xb1dcb)
    #15 std::__1::vector<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::~vector() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:557:5 (libmini_cluster.so+0x98161)
    #16 kudu::cluster::ExternalMiniCluster::~ExternalMiniCluster() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/mini-cluster/external_mini_cluster.cc:173:1 (libmini_cluster.so+0x8457e)
    #17 kudu::cluster::ExternalMiniCluster::~ExternalMiniCluster() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/mini-cluster/external_mini_cluster.cc:171:45 (libmini_cluster.so+0x84689)
    #18 std::__1::default_delete<kudu::cluster::ExternalMiniCluster>::operator()(kudu::cluster::ExternalMiniCluster*) const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2262:5 (master_replication-itest+0x355737)
    #19 std::__1::unique_ptr<kudu::cluster::ExternalMiniCluster, std::__1::default_delete<kudu::cluster::ExternalMiniCluster> >::reset(kudu::cluster::ExternalMiniCluster*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2517:7 (master_replication-itest+0x35569d)
    #20 std::__1::unique_ptr<kudu::cluster::ExternalMiniCluster, std::__1::default_delete<kudu::cluster::ExternalMiniCluster> >::~unique_ptr() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2471:19 (master_replication-itest+0x35552b)
    #21 kudu::master::MasterReplicationAndRpcSizeLimitTest::~MasterReplicationAndRpcSizeLimitTest() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/integration-tests/master_replication-itest.cc:370:7 (master_replication-itest+0x34dff6)
    #22 kudu::master::MasterReplicationAndRpcSizeLimitTest_TabletReports_Test::~MasterReplicationAndRpcSizeLimitTest_TabletReports_Test() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/integration-tests/master_replication-itest.cc:536:1 (master_replication-itest+0x34e029)
    #23 testing::Test::DeleteSelf_() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/include/gtest/gtest.h:318:24 (libgtest.so.1.12.1+0x65467)
    #24 void testing::internal::HandleSehExceptionsInMethodIfSupported<testing::Test, void>(testing::Test*, void (testing::Test::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2599:10 (libgtest.so.1.12.1+0x64e2f)
    #25 void testing::internal::HandleExceptionsInMethodIfSupported<testing::Test, void>(testing::Test*, void (testing::Test::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2635:14 (libgtest.so.1.12.1+0x64e2f)
    #26 testing::TestInfo::Run() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2859:5 (libgtest.so.1.12.1+0x43dc2)
    #27 testing::TestSuite::Run() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:3012:30 (libgtest.so.1.12.1+0x44d24)
    #28 testing::internal::UnitTestImpl::RunAllTests() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:5870:44 (libgtest.so.1.12.1+0x59814)
    #29 bool testing::internal::HandleSehExceptionsInMethodIfSupported<testing::internal::UnitTestImpl, bool>(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2599:10 (libgtest.so.1.12.1+0x65cef)
    #30 bool testing::internal::HandleExceptionsInMethodIfSupported<testing::internal::UnitTestImpl, bool>(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2635:14 (libgtest.so.1.12.1+0x65cef)
    #31 testing::UnitTest::Run() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:5444:10 (libgtest.so.1.12.1+0x58dcc)
    #32 RUN_ALL_TESTS() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/gtest/gtest.h:2293:73 (master_replication-itest+0x36544b)
    #33 main /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/test_main.cc:109:10 (master_replication-itest+0x36434c)

  Thread T264 'client-negotiat' (tid=21891, finished) created by thread T102 at:
    #0 pthread_create /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:966 (master_replication-itest+0x2c6875)
    #1 kudu::Thread::StartThread(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::function<void ()>, unsigned long, scoped_refptr<kudu::Thread>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.cc:642:15 (libkudu_util.so+0x44997a)
    #2 kudu::Thread::Create(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::function<void ()>, scoped_refptr<kudu::Thread>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.h:146:12 (libmaster.so+0x2d2ae9)
    #3 kudu::ThreadPool::CreateThread() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:918:10 (libkudu_util.so+0x46702b)
    #4 kudu::ThreadPool::DoSubmit(std::__1::function<void ()>, kudu::ThreadPoolToken*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:703:21 (libkudu_util.so+0x464f7f)
    #5 kudu::ThreadPool::Submit(std::__1::function<void ()>) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:584:10 (libkudu_util.so+0x467592)
    #6 kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:631:3 (libkrpc.so+0x199288)
    #7 kudu::rpc::ReactorThread::FindOrStartConnection(kudu::rpc::ConnectionId const&, kudu::rpc::CredentialsPolicy, scoped_refptr<kudu::rpc::Connection>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:591:14 (libkrpc.so+0x199ce1)
    #8 kudu::rpc::ReactorThread::AssignOutboundCall(std::__1::shared_ptr<kudu::rpc::OutboundCall>) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:375:14 (libkrpc.so+0x1998a6)
    #9 kudu::rpc::AssignOutboundCallTask::Run(kudu::rpc::ReactorThread*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:907:14 (libkrpc.so+0x1a514e)
    #10 kudu::rpc::ReactorThread::AsyncHandler(ev::async&, int) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:349:10 (libkrpc.so+0x198c12)
    #11 void ev::base<ev_async, ev::async>::method_thunk<kudu::rpc::ReactorThread, &(kudu::rpc::ReactorThread::AsyncHandler(ev::async&, int))>(ev_loop*, ev_async*, int) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/ev++.h:497:7 (libkrpc.so+0x1a6373)
    #12 ev_invoke_pending /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/libev-4.33/ev.c:3770:11 (libev.so.4+0x9d31)
    #13 kudu::rpc::ReactorThread::InvokePendingCb(ev_loop*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:205:3 (libkrpc.so+0x197ae5)
    #14 ev_run /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/libev-4.33/ev.c:4190:7 (libev.so.4+0xaf9a)
    #15 ev::loop_ref::run(int) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/ev++.h:211:7 (libkrpc.so+0x1a2bf8)
    #16 kudu::rpc::ReactorThread::RunThread() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:510:9 (libkrpc.so+0x19b0c5)
    #17 kudu::rpc::ReactorThread::Init()::$_0::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:197:48 (libkrpc.so+0x19de11)
    #18 decltype(std::__1::forward<kudu::rpc::ReactorThread::Init()::$_0&>(fp)()) std::__1::__invoke<kudu::rpc::ReactorThread::Init()::$_0&>(kudu::rpc::ReactorThread::Init()::$_0&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/type_traits:3905:1 (libkrpc.so+0x19ddc9)
    #19 void std::__1::__invoke_void_return_wrapper<void>::__call<kudu::rpc::ReactorThread::Init()::$_0&>(kudu::rpc::ReactorThread::Init()::$_0&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__functional_base:348:9 (libkrpc.so+0x19dd59)
    #20 std::__1::__function::__alloc_func<kudu::rpc::ReactorThread::Init()::$_0, std::__1::allocator<kudu::rpc::ReactorThread::Init()::$_0>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1557:16 (libkrpc.so+0x19dd21)
    #21 std::__1::__function::__func<kudu::rpc::ReactorThread::Init()::$_0, std::__1::allocator<kudu::rpc::ReactorThread::Init()::$_0>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1731:12 (libkrpc.so+0x19d01d)
    #22 std::__1::__function::__value_func<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1884:16 (libtserver_test_util.so+0x60694)
    #23 std::__1::function<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:2556:12 (libtserver_test_util.so+0x604c9)
    #24 kudu::Thread::SuperviseThread(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.cc:698:3 (libkudu_util.so+0x44a116)


MasterReplicationAndRpcSizeLimitTest.TabletReports: WARNING: ThreadSanitizer: data race (pid=15812)  Write of size 8 at 0x7b0800003cd8 by main thread:
    #0 operator delete(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_new_delete.cpp:126 (master_replication-itest+0x342b39)
    #1 std::__1::_DeallocateCaller::__do_call(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:334:12 (master_replication-itest+0x34eac9)
    #2 std::__1::_DeallocateCaller::__do_deallocate_handle_size(void*, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:292:12 (master_replication-itest+0x34ea69)
    #3 std::__1::_DeallocateCaller::__do_deallocate_handle_size_align(void*, unsigned long, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:268:14 (libkrpc.so+0x126292)
    #4 std::__1::__libcpp_deallocate(void*, unsigned long, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:340:3 (libkrpc.so+0x126229)
    #5 std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> >::deallocate(std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1798:10 (libkrpc.so+0x139f79)
    #6 std::__1::allocator_traits<std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> > >::deallocate(std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> >&, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1533:14 (libkrpc.so+0x139e89)
    #7 std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::destroy(std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1837:9 (libkrpc.so+0x139d71)
    #8 std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::~__tree() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1824:3 (libkrpc.so+0x139ce4)
    #9 std::__1::set<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::~set() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/set:605:5 (libkrpc.so+0x133129)
    #10 cxa_at_exit_wrapper(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:394 (master_replication-itest+0x2c28bf)

  Previous read of size 4 at 0x7b0800003cdc by thread T264:
    #0 void std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> >::construct<kudu::rpc::RpcFeatureFlag, kudu::rpc::RpcFeatureFlag const&>(kudu::rpc::RpcFeatureFlag*, kudu::rpc::RpcFeatureFlag const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1809:35 (libkrpc.so+0x13d08b)
    #1 void std::__1::allocator_traits<std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> > >::__construct<kudu::rpc::RpcFeatureFlag, kudu::rpc::RpcFeatureFlag const&>(std::__1::integral_constant<bool, true>, std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> >&, kudu::rpc::RpcFeatureFlag*, kudu::rpc::RpcFeatureFlag const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1687:21 (libkrpc.so+0x13d041)
    #2 void std::__1::allocator_traits<std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> > >::construct<kudu::rpc::RpcFeatureFlag, kudu::rpc::RpcFeatureFlag const&>(std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> >&, kudu::rpc::RpcFeatureFlag*, kudu::rpc::RpcFeatureFlag const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1538:14 (libkrpc.so+0x13cdd1)
    #3 std::__1::unique_ptr<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>, std::__1::__tree_node_destructor<std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> > > > std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::__construct_node<kudu::rpc::RpcFeatureFlag const&>(kudu::rpc::RpcFeatureFlag const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:2194:5 (libkrpc.so+0x13cb28)
    #4 std::__1::__tree_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long> std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::__emplace_multi<kudu::rpc::RpcFeatureFlag const&>(kudu::rpc::RpcFeatureFlag const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:2242:25 (libkrpc.so+0x13c9d6)
    #5 std::__1::__tree_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long> std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::__insert_multi<kudu::rpc::RpcFeatureFlag const&>(kudu::rpc::RpcFeatureFlag const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1326:16 (libkrpc.so+0x13c048)
    #6 void std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::__assign_multi<std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long> >(std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long>, std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long>) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1697:9 (libkrpc.so+0x13bcaa)
    #7 std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::operator=(std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> > const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1648:9 (libkrpc.so+0x13bb09)
    #8 std::__1::set<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::operator=(std::__1::set<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> > const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/set:540:21 (libkrpc.so+0x133c10)
    #9 kudu::rpc::ClientNegotiation::SendNegotiate() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/client_negotiation.cc:327:20 (libkrpc.so+0x12b3b6)
    #10 kudu::rpc::ClientNegotiation::Negotiate(std::__1::unique_ptr<kudu::rpc::ErrorStatusPB, std::__1::default_delete<kudu::rpc::ErrorStatusPB> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/client_negotiation.cc:185:5 (libkrpc.so+0x12abeb)
    #11 kudu::rpc::DoClientNegotiation(kudu::rpc::Connection*, kudu::TriStateFlag, kudu::TriStateFlag, bool, kudu::MonoTime, std::__1::unique_ptr<kudu::rpc::ErrorStatusPB, std::__1::default_delete<kudu::rpc::ErrorStatusPB> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/negotiation.cc:231:3 (libkrpc.so+0x180d5b)
    #12 kudu::rpc::Negotiation::RunNegotiation(scoped_refptr<kudu::rpc::Connection> const&, kudu::TriStateFlag, kudu::TriStateFlag, bool, kudu::MonoTime) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/negotiation.cc:317:9 (libkrpc.so+0x17fea2)
    #13 kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:631:3 (libkrpc.so+0x19f37c)
    #14 decltype(std::__1::forward<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&>(fp)()) std::__1::__invoke<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&>(kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/type_traits:3905:1 (libkrpc.so+0x19f2f9)
    #15 void std::__1::__invoke_void_return_wrapper<void>::__call<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&>(kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__functional_base:348:9 (libkrpc.so+0x19f289)
    #16 std::__1::__function::__alloc_func<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1, std::__1::allocator<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1557:16 (libkrpc.so+0x19f251)
    #17 std::__1::__function::__func<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1, std::__1::allocator<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1731:12 (libkrpc.so+0x19e52d)
    #18 std::__1::__function::__value_func<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1884:16 (libtserver_test_util.so+0x60694)
    #19 std::__1::function<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:2556:12 (libtserver_test_util.so+0x604c9)
    #20 kudu::ThreadPool::DispatchThread() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:837:7 (libkudu_util.so+0x467df4)
    #21 kudu::ThreadPool::CreateThread()::$_2::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:919:48 (libkudu_util.so+0x46b261)
    #22 decltype(std::__1::forward<kudu::ThreadPool::CreateThread()::$_2&>(fp)()) std::__1::__invoke<kudu::ThreadPool::CreateThread()::$_2&>(kudu::ThreadPool::CreateThread()::$_2&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/type_traits:3905:1 (libkudu_util.so+0x46b219)
    #23 void std::__1::__invoke_void_return_wrapper<void>::__call<kudu::ThreadPool::CreateThread()::$_2&>(kudu::ThreadPool::CreateThread()::$_2&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__functional_base:348:9 (libkudu_util.so+0x46b1a9)
    #24 std::__1::__function::__alloc_func<kudu::ThreadPool::CreateThread()::$_2, std::__1::allocator<kudu::ThreadPool::CreateThread()::$_2>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1557:16 (libkudu_util.so+0x46b171)
    #25 std::__1::__function::__func<kudu::ThreadPool::CreateThread()::$_2, std::__1::allocator<kudu::ThreadPool::CreateThread()::$_2>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1731:12 (libkudu_util.so+0x46a46d)
    #26 std::__1::__function::__value_func<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1884:16 (libtserver_test_util.so+0x60694)
    #27 std::__1::function<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:2556:12 (libtserver_test_util.so+0x604c9)
    #28 kudu::Thread::SuperviseThread(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.cc:698:3 (libkudu_util.so+0x44a116)

  As if synchronized via sleep:
    #0 nanosleep /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:363 (master_replication-itest+0x2e7da2)
    #1 base::SleepForNanoseconds(long) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/gutil/sysinfo.cc:96:10 (libgutil.so+0xb5f0a)
    #2 kudu::SleepFor(kudu::MonoDelta const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/monotime.cc:264:3 (libkudu_util.so+0x3ddd86)
    #3 kudu::Subprocess::KillAndWait(int) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/subprocess.cc:692:7 (libkudu_util.so+0x43c190)
    #4 kudu::clock::MiniChronyd::Stop() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/clock/test/mini_chronyd.cc:251:16 (libmini_chronyd.so+0x15190)
    #5 kudu::clock::MiniChronyd::~MiniChronyd() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/clock/test/mini_chronyd.cc:166:5 (libmini_chronyd.so+0x14e8e)
    #6 std::__1::default_delete<kudu::clock::MiniChronyd>::operator()(kudu::clock::MiniChronyd*) const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2262:5 (libmini_cluster.so+0xc18fe)
    #7 std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >::reset(kudu::clock::MiniChronyd*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2517:7 (libmini_cluster.so+0xc186d)
    #8 std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >::~unique_ptr() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2471:19 (libmini_cluster.so+0x9cc2b)
    #9 std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >::destroy(std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1811:92 (libmini_cluster.so+0xb2169)
    #10 void std::__1::allocator_traits<std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::__destroy<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >(std::__1::integral_constant<bool, true>, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >&, std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1703:21 (libmini_cluster.so+0xb2140)
    #11 void std::__1::allocator_traits<std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::destroy<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >(std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >&, std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1544:14 (libmini_cluster.so+0xb2100)
    #12 std::__1::__vector_base<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::__destruct_at_end(std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:428:9 (libmini_cluster.so+0xb20b6)
    #13 std::__1::__vector_base<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::clear() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:371:29 (libmini_cluster.so+0xb1fd4)
    #14 std::__1::__vector_base<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::~__vector_base() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:465:9 (libmini_cluster.so+0xb1dcb)
    #15 std::__1::vector<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::~vector() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:557:5 (libmini_cluster.so+0x98161)
    #16 kudu::cluster::ExternalMiniCluster::~ExternalMiniCluster() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/mini-cluster/external_mini_cluster.cc:173:1 (libmini_cluster.so+0x8457e)
    #17 kudu::cluster::ExternalMiniCluster::~ExternalMiniCluster() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/mini-cluster/external_mini_cluster.cc:171:45 (libmini_cluster.so+0x84689)
    #18 std::__1::default_delete<kudu::cluster::ExternalMiniCluster>::operator()(kudu::cluster::ExternalMiniCluster*) const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2262:5 (master_replication-itest+0x355737)
    #19 std::__1::unique_ptr<kudu::cluster::ExternalMiniCluster, std::__1::default_delete<kudu::cluster::ExternalMiniCluster> >::reset(kudu::cluster::ExternalMiniCluster*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2517:7 (master_replication-itest+0x35569d)
    #20 std::__1::unique_ptr<kudu::cluster::ExternalMiniCluster, std::__1::default_delete<kudu::cluster::ExternalMiniCluster> >::~unique_ptr() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2471:19 (master_replication-itest+0x35552b)
    #21 kudu::master::MasterReplicationAndRpcSizeLimitTest::~MasterReplicationAndRpcSizeLimitTest() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/integration-tests/master_replication-itest.cc:370:7 (master_replication-itest+0x34dff6)
    #22 kudu::master::MasterReplicationAndRpcSizeLimitTest_TabletReports_Test::~MasterReplicationAndRpcSizeLimitTest_TabletReports_Test() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/integration-tests/master_replication-itest.cc:536:1 (master_replication-itest+0x34e029)
    #23 testing::Test::DeleteSelf_() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/include/gtest/gtest.h:318:24 (libgtest.so.1.12.1+0x65467)
    #24 void testing::internal::HandleSehExceptionsInMethodIfSupported<testing::Test, void>(testing::Test*, void (testing::Test::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2599:10 (libgtest.so.1.12.1+0x64e2f)
    #25 void testing::internal::HandleExceptionsInMethodIfSupported<testing::Test, void>(testing::Test*, void (testing::Test::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2635:14 (libgtest.so.1.12.1+0x64e2f)
    #26 testing::TestInfo::Run() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2859:5 (libgtest.so.1.12.1+0x43dc2)
    #27 testing::TestSuite::Run() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:3012:30 (libgtest.so.1.12.1+0x44d24)
    #28 testing::internal::UnitTestImpl::RunAllTests() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:5870:44 (libgtest.so.1.12.1+0x59814)
    #29 bool testing::internal::HandleSehExceptionsInMethodIfSupported<testing::internal::UnitTestImpl, bool>(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2599:10 (libgtest.so.1.12.1+0x65cef)
    #30 bool testing::internal::HandleExceptionsInMethodIfSupported<testing::internal::UnitTestImpl, bool>(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2635:14 (libgtest.so.1.12.1+0x65cef)
    #31 testing::UnitTest::Run() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:5444:10 (libgtest.so.1.12.1+0x58dcc)
    #32 RUN_ALL_TESTS() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/gtest/gtest.h:2293:73 (master_replication-itest+0x36544b)
    #33 main /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/test_main.cc:109:10 (master_replication-itest+0x36434c)

  Thread T264 'client-negotiat' (tid=21891, finished) created by thread T102 at:
    #0 pthread_create /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:966 (master_replication-itest+0x2c6875)
    #1 kudu::Thread::StartThread(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::function<void ()>, unsigned long, scoped_refptr<kudu::Thread>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.cc:642:15 (libkudu_util.so+0x44997a)
    #2 kudu::Thread::Create(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::function<void ()>, scoped_refptr<kudu::Thread>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.h:146:12 (libmaster.so+0x2d2ae9)
    #3 kudu::ThreadPool::CreateThread() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:918:10 (libkudu_util.so+0x46702b)
    #4 kudu::ThreadPool::DoSubmit(std::__1::function<void ()>, kudu::ThreadPoolToken*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:703:21 (libkudu_util.so+0x464f7f)
    #5 kudu::ThreadPool::Submit(std::__1::function<void ()>) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:584:10 (libkudu_util.so+0x467592)
    #6 kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:631:3 (libkrpc.so+0x199288)
    #7 kudu::rpc::ReactorThread::FindOrStartConnection(kudu::rpc::ConnectionId const&, kudu::rpc::CredentialsPolicy, scoped_refptr<kudu::rpc::Connection>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:591:14 (libkrpc.so+0x199ce1)
    #8 kudu::rpc::ReactorThread::AssignOutboundCall(std::__1::shared_ptr<kudu::rpc::OutboundCall>) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:375:14 (libkrpc.so+0x1998a6)
    #9 kudu::rpc::AssignOutboundCallTask::Run(kudu::rpc::ReactorThread*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:907:14 (libkrpc.so+0x1a514e)
    #10 kudu::rpc::ReactorThread::AsyncHandler(ev::async&, int) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:349:10 (libkrpc.so+0x198c12)
    #11 void ev::base<ev_async, ev::async>::method_thunk<kudu::rpc::ReactorThread, &(kudu::rpc::ReactorThread::AsyncHandler(ev::async&, int))>(ev_loop*, ev_async*, int) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/ev++.h:497:7 (libkrpc.so+0x1a6373)
    #12 ev_invoke_pending /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/libev-4.33/ev.c:3770:11 (libev.so.4+0x9d31)
    #13 kudu::rpc::ReactorThread::InvokePendingCb(ev_loop*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:205:3 (libkrpc.so+0x197ae5)
    #14 ev_run /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/libev-4.33/ev.c:4190:7 (libev.so.4+0xaf9a)
    #15 ev::loop_ref::run(int) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/ev++.h:211:7 (libkrpc.so+0x1a2bf8)
    #16 kudu::rpc::ReactorThread::RunThread() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:510:9 (libkrpc.so+0x19b0c5)
    #17 kudu::rpc::ReactorThread::Init()::$_0::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:197:48 (libkrpc.so+0x19de11)
    #18 decltype(std::__1::forward<kudu::rpc::ReactorThread::Init()::$_0&>(fp)()) std::__1::__invoke<kudu::rpc::ReactorThread::Init()::$_0&>(kudu::rpc::ReactorThread::Init()::$_0&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/type_traits:3905:1 (libkrpc.so+0x19ddc9)
    #19 void std::__1::__invoke_void_return_wrapper<void>::__call<kudu::rpc::ReactorThread::Init()::$_0&>(kudu::rpc::ReactorThread::Init()::$_0&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__functional_base:348:9 (libkrpc.so+0x19dd59)
    #20 std::__1::__function::__alloc_func<kudu::rpc::ReactorThread::Init()::$_0, std::__1::allocator<kudu::rpc::ReactorThread::Init()::$_0>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1557:16 (libkrpc.so+0x19dd21)
    #21 std::__1::__function::__func<kudu::rpc::ReactorThread::Init()::$_0, std::__1::allocator<kudu::rpc::ReactorThread::Init()::$_0>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1731:12 (libkrpc.so+0x19d01d)
    #22 std::__1::__function::__value_func<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1884:16 (libtserver_test_util.so+0x60694)
    #23 std::__1::function<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:2556:12 (libtserver_test_util.so+0x604c9)
    #24 kudu::Thread::SuperviseThread(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.cc:698:3 (libkudu_util.so+0x44a116)

Full log

[==========] Running 9 tests from 2 test suites.
[----------] Global test environment set-up.
[----------] 7 tests from MasterReplicationTest
[ RUN      ] MasterReplicationTest.TestSysTablesReplication
WARNING: Logging before InitGoogleLogging() is written to STDERR
I20250411 13:57:02.669430 15812 internal_mini_cluster.cc:156] Creating distributed mini masters. Addrs: 127.15.113.62:39157,127.15.113.61:45255,127.15.113.60:44199
I20250411 13:57:02.671165 15812 env_posix.cc:2264] Not raising this process' open files per process limit of 1048576; it is already as high as it can go
I20250411 13:57:02.672214 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:02.687940 15818 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:02.688005 15821 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:02.688126 15819 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:03.805959 15820 instance_detector.cc:116] could not retrieve GCE instance metadata: Timed out: curl timeout: Timeout was reached: Connection time-out
I20250411 13:57:03.806058 15812 server_base.cc:1029] Not found: could not retrieve instance metadata: unable to detect cloud type of this node, probably running in non-cloud environment
I20250411 13:57:03.809369 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:03.809612 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:03.809805 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379823809785 us; error 0 us; skew 500 ppm
I20250411 13:57:03.810621 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:03.821646 15812 webserver.cc:466] Webserver started at http://127.15.113.62:33227/ using document root <none> and password file <none>
I20250411 13:57:03.822520 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:03.822726 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:03.823181 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:03.828766 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/master-0-root/instance:
uuid: "ef7101a74df04a5cbc67d3c9dedba068"
format_stamp: "Formatted at 2025-04-11 13:57:03 on dist-test-slave-jcj2"
I20250411 13:57:03.836634 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.007s	user 0.004s	sys 0.004s
I20250411 13:57:03.841614 15827 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:03.842644 15812 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.001s	sys 0.002s
I20250411 13:57:03.842991 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/master-0-root
uuid: "ef7101a74df04a5cbc67d3c9dedba068"
format_stamp: "Formatted at 2025-04-11 13:57:03 on dist-test-slave-jcj2"
I20250411 13:57:03.843308 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/master-0-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/master-0-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/master-0-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:03.913156 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:03.914687 15812 env_posix.cc:2264] Not raising this process' running threads per effective uid limit of 18446744073709551615; it is already as high as it can go
I20250411 13:57:03.915169 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:03.983183 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.62:39157
I20250411 13:57:03.983253 15878 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.62:39157 every 8 connection(s)
I20250411 13:57:03.988765 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:03.989710 15879 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
W20250411 13:57:03.996402 15882 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:03.995010 15881 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:04.009238 15884 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:04.009234 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:04.010229 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:04.010479 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:04.010668 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379824010653 us; error 0 us; skew 500 ppm
I20250411 13:57:04.011510 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:04.015630 15812 webserver.cc:466] Webserver started at http://127.15.113.61:34731/ using document root <none> and password file <none>
I20250411 13:57:04.016324 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:04.016551 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:04.016868 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:04.018187 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/master-1-root/instance:
uuid: "0ce6dab661e5452f9de8e26a520986a2"
format_stamp: "Formatted at 2025-04-11 13:57:04 on dist-test-slave-jcj2"
I20250411 13:57:04.020818 15879 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:04.024770 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.006s	user 0.009s	sys 0.000s
I20250411 13:57:04.031158 15890 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:04.032011 15812 fs_manager.cc:730] Time spent opening block manager: real 0.005s	user 0.004s	sys 0.002s
I20250411 13:57:04.032271 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/master-1-root
uuid: "0ce6dab661e5452f9de8e26a520986a2"
format_stamp: "Formatted at 2025-04-11 13:57:04 on dist-test-slave-jcj2"
I20250411 13:57:04.032549 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/master-1-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/master-1-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/master-1-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:04.047518 15879 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:57:04.049636 15828 proxy.cc:239] Call had error, refreshing address and retrying: Network error: Client connection negotiation failed: client connection to 127.15.113.61:45255: connect: Connection refused (error 111)
W20250411 13:57:04.053634 15879 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:45255: Network error: Client connection negotiation failed: client connection to 127.15.113.61:45255: connect: Connection refused (error 111)
I20250411 13:57:04.067711 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:04.068917 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:04.103178 15879 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } attempt: 1
W20250411 13:57:04.108073 15879 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:45255: Network error: Client connection negotiation failed: client connection to 127.15.113.61:45255: connect: Connection refused (error 111)
I20250411 13:57:04.133375 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.61:45255
I20250411 13:57:04.133451 15943 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.61:45255 every 8 connection(s)
I20250411 13:57:04.138180 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:04.138195 15944 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
W20250411 13:57:04.143873 15946 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:04.145565 15947 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:04.145677 15944 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:57:04.148645 15949 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:04.150210 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:04.150815 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:04.151075 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:04.151240 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379824151223 us; error 0 us; skew 500 ppm
I20250411 13:57:04.151795 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:04.154834 15812 webserver.cc:466] Webserver started at http://127.15.113.60:43275/ using document root <none> and password file <none>
I20250411 13:57:04.155367 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:04.155551 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:04.155871 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:04.157053 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/master-2-root/instance:
uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6"
format_stamp: "Formatted at 2025-04-11 13:57:04 on dist-test-slave-jcj2"
I20250411 13:57:04.158629 15944 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:04.163352 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.006s	user 0.007s	sys 0.000s
I20250411 13:57:04.167959 15956 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:04.168720 15812 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.004s	sys 0.000s
I20250411 13:57:04.168983 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/master-2-root
uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6"
format_stamp: "Formatted at 2025-04-11 13:57:04 on dist-test-slave-jcj2"
I20250411 13:57:04.169251 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/master-2-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/master-2-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/master-2-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:04.170994 15944 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:04.176600 15879 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } attempt: 2
W20250411 13:57:04.179160 15944 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:44199: Network error: Client connection negotiation failed: client connection to 127.15.113.60:44199: connect: Connection refused (error 111)
I20250411 13:57:04.184003 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:04.184646 15879 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:04.185303 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
W20250411 13:57:04.188994 15879 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:44199: Network error: Client connection negotiation failed: client connection to 127.15.113.60:44199: connect: Connection refused (error 111)
I20250411 13:57:04.220646 15879 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } attempt: 1
I20250411 13:57:04.222622 15944 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } attempt: 1
W20250411 13:57:04.226611 15879 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:44199: Network error: Client connection negotiation failed: client connection to 127.15.113.60:44199: connect: Connection refused (error 111)
W20250411 13:57:04.227782 15944 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:44199: Network error: Client connection negotiation failed: client connection to 127.15.113.60:44199: connect: Connection refused (error 111)
I20250411 13:57:04.248462 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.60:44199
I20250411 13:57:04.248924 16008 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.60:44199 every 8 connection(s)
I20250411 13:57:04.252554 16009 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:04.252611 15812 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 0
I20250411 13:57:04.257651 16009 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:04.268079 16009 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:04.276445 16009 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:04.291888 16009 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6: Bootstrap starting.
I20250411 13:57:04.295221 15944 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } attempt: 2
I20250411 13:57:04.298463 16009 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:04.300557 16009 log.cc:826] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6: Log is configured to *not* fsync() on all Append() calls
I20250411 13:57:04.302631 15879 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } attempt: 2
I20250411 13:57:04.306380 16009 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6: No bootstrap required, opened a new log
I20250411 13:57:04.312678 15944 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2: Bootstrap starting.
I20250411 13:57:04.317775 15879 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068: Bootstrap starting.
I20250411 13:57:04.319080 15944 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:04.322387 15879 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:04.325124 15944 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2: No bootstrap required, opened a new log
I20250411 13:57:04.327685 15879 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068: No bootstrap required, opened a new log
I20250411 13:57:04.330806 15879 raft_consensus.cc:357] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } }
I20250411 13:57:04.330832 15944 raft_consensus.cc:357] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } }
I20250411 13:57:04.330832 16009 raft_consensus.cc:357] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } }
I20250411 13:57:04.331718 15879 raft_consensus.cc:383] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:04.331826 16009 raft_consensus.cc:383] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:04.331794 15944 raft_consensus.cc:383] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:04.332082 15879 raft_consensus.cc:738] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: ef7101a74df04a5cbc67d3c9dedba068, State: Initialized, Role: FOLLOWER
I20250411 13:57:04.332209 16009 raft_consensus.cc:738] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 7eebe9d12be74e3881ebe0a09e9ce6e6, State: Initialized, Role: FOLLOWER
I20250411 13:57:04.332334 15944 raft_consensus.cc:738] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 0ce6dab661e5452f9de8e26a520986a2, State: Initialized, Role: FOLLOWER
I20250411 13:57:04.333086 15944 consensus_queue.cc:260] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } }
I20250411 13:57:04.333086 15879 consensus_queue.cc:260] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } }
I20250411 13:57:04.333084 16009 consensus_queue.cc:260] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } }
I20250411 13:57:04.337829 16019 sys_catalog.cc:455] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } } }
I20250411 13:57:04.338923 16019 sys_catalog.cc:458] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:04.340536 15879 sys_catalog.cc:564] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:04.342314 16018 sys_catalog.cc:455] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } } }
I20250411 13:57:04.343035 16018 sys_catalog.cc:458] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:04.343793 16017 sys_catalog.cc:455] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } } }
I20250411 13:57:04.344619 16017 sys_catalog.cc:458] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:04.345412 15944 sys_catalog.cc:564] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:04.345909 16009 sys_catalog.cc:564] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:04.374555 16019 raft_consensus.cc:491] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:57:04.375262 16019 raft_consensus.cc:513] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } }
I20250411 13:57:04.379813 15919 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "ef7101a74df04a5cbc67d3c9dedba068" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "0ce6dab661e5452f9de8e26a520986a2" is_pre_election: true
I20250411 13:57:04.380826 15919 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate ef7101a74df04a5cbc67d3c9dedba068 in term 0.
I20250411 13:57:04.382921 15812 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 1
I20250411 13:57:04.382706 15828 leader_election.cc:304] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 0ce6dab661e5452f9de8e26a520986a2, ef7101a74df04a5cbc67d3c9dedba068; no voters: 
I20250411 13:57:04.385876 16045 raft_consensus.cc:2802] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:57:04.386421 16045 raft_consensus.cc:491] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:57:04.386920 16045 raft_consensus.cc:3058] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:04.387220 15984 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "ef7101a74df04a5cbc67d3c9dedba068" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" is_pre_election: true
I20250411 13:57:04.388017 15984 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate ef7101a74df04a5cbc67d3c9dedba068 in term 0.
I20250411 13:57:04.389456 16019 leader_election.cc:290] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers 0ce6dab661e5452f9de8e26a520986a2 (127.15.113.61:45255), 7eebe9d12be74e3881ebe0a09e9ce6e6 (127.15.113.60:44199)
I20250411 13:57:04.395516 16045 raft_consensus.cc:513] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } }
I20250411 13:57:04.398257 15919 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "ef7101a74df04a5cbc67d3c9dedba068" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "0ce6dab661e5452f9de8e26a520986a2"
I20250411 13:57:04.398753 15919 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 0 FOLLOWER]: Advancing to term 1
W20250411 13:57:04.400110 16040 catalog_manager.cc:1560] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
W20250411 13:57:04.400512 16040 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
I20250411 13:57:04.402570 15984 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "ef7101a74df04a5cbc67d3c9dedba068" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6"
I20250411 13:57:04.403265 15984 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:04.406275 15919 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate ef7101a74df04a5cbc67d3c9dedba068 in term 1.
I20250411 13:57:04.407213 15828 leader_election.cc:304] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [CANDIDATE]: Term 1 election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 0ce6dab661e5452f9de8e26a520986a2, ef7101a74df04a5cbc67d3c9dedba068; no voters: 
I20250411 13:57:04.407956 16019 raft_consensus.cc:2802] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [term 1 FOLLOWER]: Leader election won for term 1
I20250411 13:57:04.411767 15984 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate ef7101a74df04a5cbc67d3c9dedba068 in term 1.
I20250411 13:57:04.412348 16045 leader_election.cc:290] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [CANDIDATE]: Term 1 election: Requested vote from peers 0ce6dab661e5452f9de8e26a520986a2 (127.15.113.61:45255), 7eebe9d12be74e3881ebe0a09e9ce6e6 (127.15.113.60:44199)
I20250411 13:57:04.414445 16051 catalog_manager.cc:797] Waiting for catalog manager background task thread to start: Service unavailable: Catalog manager is not initialized. State: Starting
I20250411 13:57:04.415390 16019 raft_consensus.cc:695] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [term 1 LEADER]: Becoming Leader. State: Replica: ef7101a74df04a5cbc67d3c9dedba068, State: Running, Role: LEADER
I20250411 13:57:04.416431 16019 consensus_queue.cc:237] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 1, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } }
W20250411 13:57:04.420213 15833 tablet.cc:2367] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068: Can't schedule compaction. Clean time has not been advanced past its initial value.
I20250411 13:57:04.422086 15812 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 2
I20250411 13:57:04.424067 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:04.428102 16055 catalog_manager.cc:1560] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
W20250411 13:57:04.428460 16055 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
I20250411 13:57:04.431545 16045 sys_catalog.cc:455] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [sys.catalog]: SysCatalogTable state changed. Reason: New leader ef7101a74df04a5cbc67d3c9dedba068. Latest consensus state: current_term: 1 leader_uuid: "ef7101a74df04a5cbc67d3c9dedba068" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } } }
I20250411 13:57:04.432404 16045 sys_catalog.cc:458] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [sys.catalog]: This master's current role is: LEADER
W20250411 13:57:04.433988 16057 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:04.437820 16058 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:04.439244 16059 catalog_manager.cc:1477] Loading table and tablet metadata into memory...
W20250411 13:57:04.440780 16061 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:04.444211 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:04.445041 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:04.445235 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:04.445377 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379824445361 us; error 0 us; skew 500 ppm
I20250411 13:57:04.445858 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:04.447346 16059 catalog_manager.cc:1486] Initializing Kudu cluster ID...
I20250411 13:57:04.448822 15812 webserver.cc:466] Webserver started at http://127.15.113.1:40363/ using document root <none> and password file <none>
I20250411 13:57:04.449306 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:04.449470 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:04.449741 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:04.450732 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/ts-0-root/instance:
uuid: "a6dd2df099324e599cee6ac593d2f5c4"
format_stamp: "Formatted at 2025-04-11 13:57:04 on dist-test-slave-jcj2"
I20250411 13:57:04.455359 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.004s	sys 0.000s
I20250411 13:57:04.459646 16067 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:04.460502 15812 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.002s	sys 0.002s
I20250411 13:57:04.460805 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/ts-0-root
uuid: "a6dd2df099324e599cee6ac593d2f5c4"
format_stamp: "Formatted at 2025-04-11 13:57:04 on dist-test-slave-jcj2"
I20250411 13:57:04.461155 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/ts-0-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/ts-0-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/ts-0-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:04.463642 15984 raft_consensus.cc:1273] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 1 FOLLOWER]: Refusing update from remote peer ef7101a74df04a5cbc67d3c9dedba068: Log matching property violated. Preceding OpId in replica: term: 0 index: 0. Preceding OpId from leader: term: 1 index: 2. (index mismatch)
I20250411 13:57:04.464493 15919 raft_consensus.cc:1273] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 1 FOLLOWER]: Refusing update from remote peer ef7101a74df04a5cbc67d3c9dedba068: Log matching property violated. Preceding OpId in replica: term: 0 index: 0. Preceding OpId from leader: term: 1 index: 2. (index mismatch)
I20250411 13:57:04.465365 16045 consensus_queue.cc:1035] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [LEADER]: Connected to new peer: Peer: permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:57:04.466900 16019 consensus_queue.cc:1035] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [LEADER]: Connected to new peer: Peer: permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:57:04.492652 16018 sys_catalog.cc:455] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [sys.catalog]: SysCatalogTable state changed. Reason: New leader ef7101a74df04a5cbc67d3c9dedba068. Latest consensus state: current_term: 1 leader_uuid: "ef7101a74df04a5cbc67d3c9dedba068" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } } }
I20250411 13:57:04.493346 16018 sys_catalog.cc:458] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:04.507189 16017 sys_catalog.cc:455] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [sys.catalog]: SysCatalogTable state changed. Reason: New leader ef7101a74df04a5cbc67d3c9dedba068. Latest consensus state: current_term: 1 leader_uuid: "ef7101a74df04a5cbc67d3c9dedba068" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } } }
I20250411 13:57:04.507962 16017 sys_catalog.cc:458] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:04.512439 16045 sys_catalog.cc:455] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [sys.catalog]: SysCatalogTable state changed. Reason: Peer health change. Latest consensus state: current_term: 1 leader_uuid: "ef7101a74df04a5cbc67d3c9dedba068" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } } }
I20250411 13:57:04.513172 16045 sys_catalog.cc:458] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:04.513990 16045 sys_catalog.cc:455] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [sys.catalog]: SysCatalogTable state changed. Reason: Peer health change. Latest consensus state: current_term: 1 leader_uuid: "ef7101a74df04a5cbc67d3c9dedba068" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } } }
I20250411 13:57:04.514649 16045 sys_catalog.cc:458] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:04.518632 16018 sys_catalog.cc:455] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [sys.catalog]: SysCatalogTable state changed. Reason: Replicated consensus-only round. Latest consensus state: current_term: 1 leader_uuid: "ef7101a74df04a5cbc67d3c9dedba068" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } } }
I20250411 13:57:04.519305 16018 sys_catalog.cc:458] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:04.524292 16017 sys_catalog.cc:455] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [sys.catalog]: SysCatalogTable state changed. Reason: Replicated consensus-only round. Latest consensus state: current_term: 1 leader_uuid: "ef7101a74df04a5cbc67d3c9dedba068" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } } }
I20250411 13:57:04.524895 16017 sys_catalog.cc:458] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:04.525151 16059 catalog_manager.cc:1349] Generated new cluster ID: 65369a8061d140ae883d3590ec905e7c
I20250411 13:57:04.525410 16059 catalog_manager.cc:1497] Initializing Kudu internal certificate authority...
I20250411 13:57:04.530655 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:04.532022 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:04.533522 15812 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:04.537715 15812 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:04.537967 15812 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:04.538189 15812 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:04.538333 15812 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:04.584134 16059 catalog_manager.cc:1372] Generated new certificate authority record
I20250411 13:57:04.586611 16059 catalog_manager.cc:1506] Loading token signing keys...
I20250411 13:57:04.619081 16059 catalog_manager.cc:5954] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068: Generated new TSK 0
I20250411 13:57:04.620546 16059 catalog_manager.cc:1516] Initializing in-progress tserver states...
I20250411 13:57:04.663538 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.1:38103
I20250411 13:57:04.663877 16136 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.1:38103 every 8 connection(s)
I20250411 13:57:04.694931 16137 heartbeater.cc:344] Connected to a master server at 127.15.113.60:44199
I20250411 13:57:04.695451 16137 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:04.696686 16137 heartbeater.cc:507] Master 127.15.113.60:44199 requested a full tablet report, sending...
I20250411 13:57:04.700903 16138 heartbeater.cc:344] Connected to a master server at 127.15.113.61:45255
I20250411 13:57:04.701319 16138 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:04.702267 16138 heartbeater.cc:507] Master 127.15.113.61:45255 requested a full tablet report, sending...
I20250411 13:57:04.704538 15974 ts_manager.cc:194] Registered new tserver with Master: a6dd2df099324e599cee6ac593d2f5c4 (127.15.113.1:38103)
I20250411 13:57:04.705026 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:04.705562 15909 ts_manager.cc:194] Registered new tserver with Master: a6dd2df099324e599cee6ac593d2f5c4 (127.15.113.1:38103)
I20250411 13:57:04.712016 16139 heartbeater.cc:344] Connected to a master server at 127.15.113.62:39157
I20250411 13:57:04.712419 16139 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:04.713333 16139 heartbeater.cc:507] Master 127.15.113.62:39157 requested a full tablet report, sending...
W20250411 13:57:04.713531 16144 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:04.715778 15844 ts_manager.cc:194] Registered new tserver with Master: a6dd2df099324e599cee6ac593d2f5c4 (127.15.113.1:38103)
W20250411 13:57:04.717073 16145 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:04.719609 15844 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.0.0.1:51718
W20250411 13:57:04.726655 16147 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:04.727164 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:04.728260 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:04.728511 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:04.728713 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379824728688 us; error 0 us; skew 500 ppm
I20250411 13:57:04.729393 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:04.732159 15812 webserver.cc:466] Webserver started at http://127.15.113.2:43683/ using document root <none> and password file <none>
I20250411 13:57:04.732764 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:04.732980 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:04.733299 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:04.734611 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/ts-1-root/instance:
uuid: "9231028098a14e2c902c68bc82c523db"
format_stamp: "Formatted at 2025-04-11 13:57:04 on dist-test-slave-jcj2"
I20250411 13:57:04.739907 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.005s	user 0.004s	sys 0.000s
I20250411 13:57:04.743737 16152 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:04.744549 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.004s	sys 0.000s
I20250411 13:57:04.744876 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/ts-1-root
uuid: "9231028098a14e2c902c68bc82c523db"
format_stamp: "Formatted at 2025-04-11 13:57:04 on dist-test-slave-jcj2"
I20250411 13:57:04.745209 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/ts-1-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/ts-1-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/ts-1-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:04.762064 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:04.763717 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:04.766098 15812 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:04.769598 15812 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:04.769861 15812 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:04.770149 15812 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:04.770344 15812 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:04.847532 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.2:43267
I20250411 13:57:04.847671 16214 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.2:43267 every 8 connection(s)
I20250411 13:57:04.878010 16216 heartbeater.cc:344] Connected to a master server at 127.15.113.61:45255
I20250411 13:57:04.878262 16215 heartbeater.cc:344] Connected to a master server at 127.15.113.60:44199
I20250411 13:57:04.878444 16216 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:04.878569 16215 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:04.879417 16215 heartbeater.cc:507] Master 127.15.113.60:44199 requested a full tablet report, sending...
I20250411 13:57:04.879381 16216 heartbeater.cc:507] Master 127.15.113.61:45255 requested a full tablet report, sending...
I20250411 13:57:04.882359 15974 ts_manager.cc:194] Registered new tserver with Master: 9231028098a14e2c902c68bc82c523db (127.15.113.2:43267)
I20250411 13:57:04.882359 15909 ts_manager.cc:194] Registered new tserver with Master: 9231028098a14e2c902c68bc82c523db (127.15.113.2:43267)
I20250411 13:57:04.884964 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:04.888387 16218 heartbeater.cc:344] Connected to a master server at 127.15.113.62:39157
I20250411 13:57:04.888680 16218 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:04.889314 16218 heartbeater.cc:507] Master 127.15.113.62:39157 requested a full tablet report, sending...
I20250411 13:57:04.891957 15844 ts_manager.cc:194] Registered new tserver with Master: 9231028098a14e2c902c68bc82c523db (127.15.113.2:43267)
W20250411 13:57:04.892947 16222 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:04.893615 15844 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.0.0.1:51720
W20250411 13:57:04.896273 16223 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:04.900672 16225 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:04.900840 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:04.901990 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:04.902269 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:04.902491 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379824902475 us; error 0 us; skew 500 ppm
I20250411 13:57:04.903194 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:04.906069 15812 webserver.cc:466] Webserver started at http://127.15.113.3:33759/ using document root <none> and password file <none>
I20250411 13:57:04.906662 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:04.906942 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:04.907269 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:04.908599 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/ts-2-root/instance:
uuid: "1eddec810d644687b62a5f4756c4e702"
format_stamp: "Formatted at 2025-04-11 13:57:04 on dist-test-slave-jcj2"
I20250411 13:57:04.914436 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.005s	user 0.001s	sys 0.006s
I20250411 13:57:04.918517 16230 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:04.919376 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.003s	sys 0.000s
I20250411 13:57:04.919749 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/ts-2-root
uuid: "1eddec810d644687b62a5f4756c4e702"
format_stamp: "Formatted at 2025-04-11 13:57:04 on dist-test-slave-jcj2"
I20250411 13:57:04.920095 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/ts-2-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/ts-2-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestSysTablesReplication.1744379822517842-15812-0/minicluster-data/ts-2-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:04.953658 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:04.955384 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:04.958088 15812 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:04.960903 15812 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:04.961174 15812 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:04.961448 15812 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:04.961661 15812 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:05.046218 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.3:42843
I20250411 13:57:05.046337 16292 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.3:42843 every 8 connection(s)
I20250411 13:57:05.071383 16294 heartbeater.cc:344] Connected to a master server at 127.15.113.61:45255
I20250411 13:57:05.071846 16294 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:05.072863 16294 heartbeater.cc:507] Master 127.15.113.61:45255 requested a full tablet report, sending...
I20250411 13:57:05.075879 15909 ts_manager.cc:194] Registered new tserver with Master: 1eddec810d644687b62a5f4756c4e702 (127.15.113.3:42843)
I20250411 13:57:05.081915 16293 heartbeater.cc:344] Connected to a master server at 127.15.113.60:44199
I20250411 13:57:05.082330 16293 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:05.083271 16293 heartbeater.cc:507] Master 127.15.113.60:44199 requested a full tablet report, sending...
I20250411 13:57:05.086892 16295 heartbeater.cc:344] Connected to a master server at 127.15.113.62:39157
I20250411 13:57:05.086792 15974 ts_manager.cc:194] Registered new tserver with Master: 1eddec810d644687b62a5f4756c4e702 (127.15.113.3:42843)
I20250411 13:57:05.087317 16295 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:05.088158 16295 heartbeater.cc:507] Master 127.15.113.62:39157 requested a full tablet report, sending...
I20250411 13:57:05.089949 15844 ts_manager.cc:194] Registered new tserver with Master: 1eddec810d644687b62a5f4756c4e702 (127.15.113.3:42843)
I20250411 13:57:05.090518 15812 internal_mini_cluster.cc:371] 3 TS(s) registered with all masters after 0.021964843s
I20250411 13:57:05.091272 15844 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.0.0.1:51728
I20250411 13:57:05.137840 15844 catalog_manager.cc:2232] Servicing CreateTable request from {username='slave'} at 127.0.0.1:51740:
name: "testMasterReplication-1"
schema {
  columns {
    name: "key"
    type: INT32
    is_key: true
    is_nullable: false
    encoding: AUTO_ENCODING
    compression: DEFAULT_COMPRESSION
    cfile_block_size: 0
    immutable: false
  }
  columns {
    name: "int_val"
    type: INT32
    is_key: false
    is_nullable: false
    encoding: AUTO_ENCODING
    compression: DEFAULT_COMPRESSION
    cfile_block_size: 0
    immutable: false
  }
  columns {
    name: "string_val"
    type: STRING
    is_key: false
    is_nullable: false
    encoding: AUTO_ENCODING
    compression: DEFAULT_COMPRESSION
    cfile_block_size: 0
    immutable: false
  }
}
split_rows_range_bounds {
}
partition_schema {
  range_schema {
    columns {
      name: "key"
    }
  }
}
W20250411 13:57:05.156448 15844 catalog_manager.cc:6943] The number of live tablet servers is not enough to re-replicate a tablet replica of the newly created table testMasterReplication-1 in case of a server failure: 4 tablet servers would be needed, 3 are available. Consider bringing up more tablet servers.
I20250411 13:57:05.256841 16102 tablet_service.cc:1467] Processing CreateTablet for tablet 27323d93ce9a4ae1a054154f0fb6b1cf (DEFAULT_TABLE table=testMasterReplication-1 [id=bee46abd8d4e4bfa87d2c72e80145c47]), partition=RANGE (key) PARTITION UNBOUNDED
I20250411 13:57:05.258479 16102 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 27323d93ce9a4ae1a054154f0fb6b1cf. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:05.263484 16180 tablet_service.cc:1467] Processing CreateTablet for tablet 27323d93ce9a4ae1a054154f0fb6b1cf (DEFAULT_TABLE table=testMasterReplication-1 [id=bee46abd8d4e4bfa87d2c72e80145c47]), partition=RANGE (key) PARTITION UNBOUNDED
I20250411 13:57:05.261287 16258 tablet_service.cc:1467] Processing CreateTablet for tablet 27323d93ce9a4ae1a054154f0fb6b1cf (DEFAULT_TABLE table=testMasterReplication-1 [id=bee46abd8d4e4bfa87d2c72e80145c47]), partition=RANGE (key) PARTITION UNBOUNDED
I20250411 13:57:05.265246 16258 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 27323d93ce9a4ae1a054154f0fb6b1cf. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:05.265300 16180 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 27323d93ce9a4ae1a054154f0fb6b1cf. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:05.285773 16323 tablet_bootstrap.cc:492] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db: Bootstrap starting.
I20250411 13:57:05.288265 16322 tablet_bootstrap.cc:492] T 27323d93ce9a4ae1a054154f0fb6b1cf P a6dd2df099324e599cee6ac593d2f5c4: Bootstrap starting.
I20250411 13:57:05.290163 16324 tablet_bootstrap.cc:492] T 27323d93ce9a4ae1a054154f0fb6b1cf P 1eddec810d644687b62a5f4756c4e702: Bootstrap starting.
I20250411 13:57:05.299162 16324 tablet_bootstrap.cc:654] T 27323d93ce9a4ae1a054154f0fb6b1cf P 1eddec810d644687b62a5f4756c4e702: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:05.299284 16323 tablet_bootstrap.cc:654] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:05.300807 16322 tablet_bootstrap.cc:654] T 27323d93ce9a4ae1a054154f0fb6b1cf P a6dd2df099324e599cee6ac593d2f5c4: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:05.308207 16324 tablet_bootstrap.cc:492] T 27323d93ce9a4ae1a054154f0fb6b1cf P 1eddec810d644687b62a5f4756c4e702: No bootstrap required, opened a new log
I20250411 13:57:05.308766 16324 ts_tablet_manager.cc:1397] T 27323d93ce9a4ae1a054154f0fb6b1cf P 1eddec810d644687b62a5f4756c4e702: Time spent bootstrapping tablet: real 0.019s	user 0.004s	sys 0.010s
I20250411 13:57:05.310062 16323 tablet_bootstrap.cc:492] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db: No bootstrap required, opened a new log
I20250411 13:57:05.310634 16323 ts_tablet_manager.cc:1397] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db: Time spent bootstrapping tablet: real 0.025s	user 0.010s	sys 0.004s
I20250411 13:57:05.311926 16324 raft_consensus.cc:357] T 27323d93ce9a4ae1a054154f0fb6b1cf P 1eddec810d644687b62a5f4756c4e702 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 } } peers { permanent_uuid: "9231028098a14e2c902c68bc82c523db" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 43267 } } peers { permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 } }
I20250411 13:57:05.312745 16324 raft_consensus.cc:383] T 27323d93ce9a4ae1a054154f0fb6b1cf P 1eddec810d644687b62a5f4756c4e702 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:05.313153 16324 raft_consensus.cc:738] T 27323d93ce9a4ae1a054154f0fb6b1cf P 1eddec810d644687b62a5f4756c4e702 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 1eddec810d644687b62a5f4756c4e702, State: Initialized, Role: FOLLOWER
I20250411 13:57:05.314025 16324 consensus_queue.cc:260] T 27323d93ce9a4ae1a054154f0fb6b1cf P 1eddec810d644687b62a5f4756c4e702 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 } } peers { permanent_uuid: "9231028098a14e2c902c68bc82c523db" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 43267 } } peers { permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 } }
I20250411 13:57:05.314232 16323 raft_consensus.cc:357] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 } } peers { permanent_uuid: "9231028098a14e2c902c68bc82c523db" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 43267 } } peers { permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 } }
I20250411 13:57:05.315508 16323 raft_consensus.cc:383] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:05.315996 16323 raft_consensus.cc:738] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 9231028098a14e2c902c68bc82c523db, State: Initialized, Role: FOLLOWER
I20250411 13:57:05.317006 16323 consensus_queue.cc:260] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 } } peers { permanent_uuid: "9231028098a14e2c902c68bc82c523db" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 43267 } } peers { permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 } }
I20250411 13:57:05.321416 16322 tablet_bootstrap.cc:492] T 27323d93ce9a4ae1a054154f0fb6b1cf P a6dd2df099324e599cee6ac593d2f5c4: No bootstrap required, opened a new log
I20250411 13:57:05.322081 16322 ts_tablet_manager.cc:1397] T 27323d93ce9a4ae1a054154f0fb6b1cf P a6dd2df099324e599cee6ac593d2f5c4: Time spent bootstrapping tablet: real 0.034s	user 0.009s	sys 0.015s
I20250411 13:57:05.324285 16324 ts_tablet_manager.cc:1428] T 27323d93ce9a4ae1a054154f0fb6b1cf P 1eddec810d644687b62a5f4756c4e702: Time spent starting tablet: real 0.015s	user 0.006s	sys 0.008s
I20250411 13:57:05.326481 16322 raft_consensus.cc:357] T 27323d93ce9a4ae1a054154f0fb6b1cf P a6dd2df099324e599cee6ac593d2f5c4 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 } } peers { permanent_uuid: "9231028098a14e2c902c68bc82c523db" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 43267 } } peers { permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 } }
I20250411 13:57:05.327596 16322 raft_consensus.cc:383] T 27323d93ce9a4ae1a054154f0fb6b1cf P a6dd2df099324e599cee6ac593d2f5c4 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:05.328016 16322 raft_consensus.cc:738] T 27323d93ce9a4ae1a054154f0fb6b1cf P a6dd2df099324e599cee6ac593d2f5c4 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: a6dd2df099324e599cee6ac593d2f5c4, State: Initialized, Role: FOLLOWER
I20250411 13:57:05.329030 16322 consensus_queue.cc:260] T 27323d93ce9a4ae1a054154f0fb6b1cf P a6dd2df099324e599cee6ac593d2f5c4 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 } } peers { permanent_uuid: "9231028098a14e2c902c68bc82c523db" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 43267 } } peers { permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 } }
I20250411 13:57:05.336098 16295 heartbeater.cc:499] Master 127.15.113.62:39157 was elected leader, sending a full tablet report...
I20250411 13:57:05.342247 16218 heartbeater.cc:499] Master 127.15.113.62:39157 was elected leader, sending a full tablet report...
I20250411 13:57:05.343751 16322 ts_tablet_manager.cc:1428] T 27323d93ce9a4ae1a054154f0fb6b1cf P a6dd2df099324e599cee6ac593d2f5c4: Time spent starting tablet: real 0.021s	user 0.008s	sys 0.012s
I20250411 13:57:05.345360 16139 heartbeater.cc:499] Master 127.15.113.62:39157 was elected leader, sending a full tablet report...
I20250411 13:57:05.350927 16323 ts_tablet_manager.cc:1428] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db: Time spent starting tablet: real 0.040s	user 0.010s	sys 0.011s
I20250411 13:57:05.418525 16051 catalog_manager.cc:1261] Loaded cluster ID: 65369a8061d140ae883d3590ec905e7c
I20250411 13:57:05.418763 16051 catalog_manager.cc:1554] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6: loading cluster ID for follower catalog manager: success
I20250411 13:57:05.423167 16051 catalog_manager.cc:1576] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6: acquiring CA information for follower catalog manager: success
I20250411 13:57:05.426301 16051 catalog_manager.cc:1604] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6: importing token verification keys for follower catalog manager: success; most recent TSK sequence number 0
I20250411 13:57:05.432313 16055 catalog_manager.cc:1261] Loaded cluster ID: 65369a8061d140ae883d3590ec905e7c
I20250411 13:57:05.432588 16055 catalog_manager.cc:1554] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2: loading cluster ID for follower catalog manager: success
I20250411 13:57:05.437283 16055 catalog_manager.cc:1576] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2: acquiring CA information for follower catalog manager: success
I20250411 13:57:05.440335 16055 catalog_manager.cc:1604] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2: importing token verification keys for follower catalog manager: success; most recent TSK sequence number 0
I20250411 13:57:05.489913 16329 raft_consensus.cc:491] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:57:05.490520 16329 raft_consensus.cc:513] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 } } peers { permanent_uuid: "9231028098a14e2c902c68bc82c523db" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 43267 } } peers { permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 } }
I20250411 13:57:05.501017 16329 leader_election.cc:290] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers 1eddec810d644687b62a5f4756c4e702 (127.15.113.3:42843), a6dd2df099324e599cee6ac593d2f5c4 (127.15.113.1:38103)
I20250411 13:57:05.514664 16112 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "27323d93ce9a4ae1a054154f0fb6b1cf" candidate_uuid: "9231028098a14e2c902c68bc82c523db" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "a6dd2df099324e599cee6ac593d2f5c4" is_pre_election: true
I20250411 13:57:05.515586 16112 raft_consensus.cc:2466] T 27323d93ce9a4ae1a054154f0fb6b1cf P a6dd2df099324e599cee6ac593d2f5c4 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 9231028098a14e2c902c68bc82c523db in term 0.
I20250411 13:57:05.516727 16156 leader_election.cc:304] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 9231028098a14e2c902c68bc82c523db, a6dd2df099324e599cee6ac593d2f5c4; no voters: 
I20250411 13:57:05.517549 16329 raft_consensus.cc:2802] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:57:05.517977 16329 raft_consensus.cc:491] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:57:05.518337 16329 raft_consensus.cc:3058] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:05.544322 16329 raft_consensus.cc:513] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 } } peers { permanent_uuid: "9231028098a14e2c902c68bc82c523db" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 43267 } } peers { permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 } }
I20250411 13:57:06.302496 16329 leader_election.cc:290] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db [CANDIDATE]: Term 1 election: Requested vote from peers 1eddec810d644687b62a5f4756c4e702 (127.15.113.3:42843), a6dd2df099324e599cee6ac593d2f5c4 (127.15.113.1:38103)
I20250411 13:57:06.304811 16112 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "27323d93ce9a4ae1a054154f0fb6b1cf" candidate_uuid: "9231028098a14e2c902c68bc82c523db" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "a6dd2df099324e599cee6ac593d2f5c4"
I20250411 13:57:06.305490 16112 raft_consensus.cc:3058] T 27323d93ce9a4ae1a054154f0fb6b1cf P a6dd2df099324e599cee6ac593d2f5c4 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:06.313288 16112 raft_consensus.cc:2466] T 27323d93ce9a4ae1a054154f0fb6b1cf P a6dd2df099324e599cee6ac593d2f5c4 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate 9231028098a14e2c902c68bc82c523db in term 1.
I20250411 13:57:06.314464 16156 leader_election.cc:304] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db [CANDIDATE]: Term 1 election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 9231028098a14e2c902c68bc82c523db, a6dd2df099324e599cee6ac593d2f5c4; no voters: 
I20250411 13:57:06.315335 16329 raft_consensus.cc:2802] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db [term 1 FOLLOWER]: Leader election won for term 1
I20250411 13:57:06.323855 16329 raft_consensus.cc:695] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db [term 1 LEADER]: Becoming Leader. State: Replica: 9231028098a14e2c902c68bc82c523db, State: Running, Role: LEADER
I20250411 13:57:06.324937 16329 consensus_queue.cc:237] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 1, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 } } peers { permanent_uuid: "9231028098a14e2c902c68bc82c523db" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 43267 } } peers { permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 } }
I20250411 13:57:06.355844 16337 raft_consensus.cc:491] T 27323d93ce9a4ae1a054154f0fb6b1cf P 1eddec810d644687b62a5f4756c4e702 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:57:06.356388 16337 raft_consensus.cc:513] T 27323d93ce9a4ae1a054154f0fb6b1cf P 1eddec810d644687b62a5f4756c4e702 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 } } peers { permanent_uuid: "9231028098a14e2c902c68bc82c523db" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 43267 } } peers { permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 } }
I20250411 13:57:06.358975 16337 leader_election.cc:290] T 27323d93ce9a4ae1a054154f0fb6b1cf P 1eddec810d644687b62a5f4756c4e702 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers 9231028098a14e2c902c68bc82c523db (127.15.113.2:43267), a6dd2df099324e599cee6ac593d2f5c4 (127.15.113.1:38103)
I20250411 13:57:06.386471 16112 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "27323d93ce9a4ae1a054154f0fb6b1cf" candidate_uuid: "1eddec810d644687b62a5f4756c4e702" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "a6dd2df099324e599cee6ac593d2f5c4" is_pre_election: true
I20250411 13:57:06.387413 16112 raft_consensus.cc:2391] T 27323d93ce9a4ae1a054154f0fb6b1cf P a6dd2df099324e599cee6ac593d2f5c4 [term 1 FOLLOWER]: Leader pre-election vote request: Denying vote to candidate 1eddec810d644687b62a5f4756c4e702 in current term 1: Already voted for candidate 9231028098a14e2c902c68bc82c523db in this term.
I20250411 13:57:06.440913 16268 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "27323d93ce9a4ae1a054154f0fb6b1cf" candidate_uuid: "9231028098a14e2c902c68bc82c523db" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "1eddec810d644687b62a5f4756c4e702" is_pre_election: true
I20250411 13:57:06.441610 16268 raft_consensus.cc:2466] T 27323d93ce9a4ae1a054154f0fb6b1cf P 1eddec810d644687b62a5f4756c4e702 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 9231028098a14e2c902c68bc82c523db in term 0.
I20250411 13:57:06.442188 16267 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "27323d93ce9a4ae1a054154f0fb6b1cf" candidate_uuid: "9231028098a14e2c902c68bc82c523db" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "1eddec810d644687b62a5f4756c4e702"
I20250411 13:57:06.442763 16267 raft_consensus.cc:3058] T 27323d93ce9a4ae1a054154f0fb6b1cf P 1eddec810d644687b62a5f4756c4e702 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:06.444592 15841 catalog_manager.cc:5581] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db reported cstate change: term changed from 0 to 1, leader changed from <none> to 9231028098a14e2c902c68bc82c523db (127.15.113.2). New cstate: current_term: 1 leader_uuid: "9231028098a14e2c902c68bc82c523db" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 } health_report { overall_health: UNKNOWN } } peers { permanent_uuid: "9231028098a14e2c902c68bc82c523db" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 43267 } health_report { overall_health: HEALTHY } } peers { permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 } health_report { overall_health: UNKNOWN } } }
I20250411 13:57:06.447134 16267 raft_consensus.cc:2466] T 27323d93ce9a4ae1a054154f0fb6b1cf P 1eddec810d644687b62a5f4756c4e702 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate 9231028098a14e2c902c68bc82c523db in term 1.
I20250411 13:57:06.501360 16190 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "27323d93ce9a4ae1a054154f0fb6b1cf" candidate_uuid: "1eddec810d644687b62a5f4756c4e702" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "9231028098a14e2c902c68bc82c523db" is_pre_election: true
I20250411 13:57:06.504104 16232 leader_election.cc:304] T 27323d93ce9a4ae1a054154f0fb6b1cf P 1eddec810d644687b62a5f4756c4e702 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate lost. Election summary: received 3 responses out of 3 voters: 1 yes votes; 2 no votes. yes voters: 1eddec810d644687b62a5f4756c4e702; no voters: 9231028098a14e2c902c68bc82c523db, a6dd2df099324e599cee6ac593d2f5c4
I20250411 13:57:06.505611 16337 raft_consensus.cc:2747] T 27323d93ce9a4ae1a054154f0fb6b1cf P 1eddec810d644687b62a5f4756c4e702 [term 1 FOLLOWER]: Leader pre-election lost for term 1. Reason: could not achieve majority
I20250411 13:57:06.512588 15842 catalog_manager.cc:2232] Servicing CreateTable request from {username='slave'} at 127.0.0.1:51740:
name: "testMasterReplication-2"
schema {
  columns {
    name: "key"
    type: INT32
    is_key: true
    is_nullable: false
    encoding: AUTO_ENCODING
    compression: DEFAULT_COMPRESSION
    cfile_block_size: 0
    immutable: false
  }
  columns {
    name: "int_val"
    type: INT32
    is_key: false
    is_nullable: false
    encoding: AUTO_ENCODING
    compression: DEFAULT_COMPRESSION
    cfile_block_size: 0
    immutable: false
  }
  columns {
    name: "string_val"
    type: STRING
    is_key: false
    is_nullable: false
    encoding: AUTO_ENCODING
    compression: DEFAULT_COMPRESSION
    cfile_block_size: 0
    immutable: false
  }
}
split_rows_range_bounds {
}
partition_schema {
  range_schema {
    columns {
      name: "key"
    }
  }
}
W20250411 13:57:06.514873 15842 catalog_manager.cc:6943] The number of live tablet servers is not enough to re-replicate a tablet replica of the newly created table testMasterReplication-2 in case of a server failure: 4 tablet servers would be needed, 3 are available. Consider bringing up more tablet servers.
I20250411 13:57:06.549894 16102 tablet_service.cc:1467] Processing CreateTablet for tablet 21a33548ff234470a1cc0587eec0c7ad (DEFAULT_TABLE table=testMasterReplication-2 [id=663a3e8e52364ea0a9eae1d53908162c]), partition=RANGE (key) PARTITION UNBOUNDED
I20250411 13:57:06.551086 16102 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 21a33548ff234470a1cc0587eec0c7ad. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:06.551688 16180 tablet_service.cc:1467] Processing CreateTablet for tablet 21a33548ff234470a1cc0587eec0c7ad (DEFAULT_TABLE table=testMasterReplication-2 [id=663a3e8e52364ea0a9eae1d53908162c]), partition=RANGE (key) PARTITION UNBOUNDED
I20250411 13:57:06.552773 16180 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 21a33548ff234470a1cc0587eec0c7ad. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:06.553617 16258 tablet_service.cc:1467] Processing CreateTablet for tablet 21a33548ff234470a1cc0587eec0c7ad (DEFAULT_TABLE table=testMasterReplication-2 [id=663a3e8e52364ea0a9eae1d53908162c]), partition=RANGE (key) PARTITION UNBOUNDED
I20250411 13:57:06.554698 16258 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 21a33548ff234470a1cc0587eec0c7ad. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:06.575816 16351 tablet_bootstrap.cc:492] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702: Bootstrap starting.
I20250411 13:57:06.577293 16352 tablet_bootstrap.cc:492] T 21a33548ff234470a1cc0587eec0c7ad P 9231028098a14e2c902c68bc82c523db: Bootstrap starting.
I20250411 13:57:06.583155 16351 tablet_bootstrap.cc:654] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:06.583945 16352 tablet_bootstrap.cc:654] T 21a33548ff234470a1cc0587eec0c7ad P 9231028098a14e2c902c68bc82c523db: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:06.588244 16353 tablet_bootstrap.cc:492] T 21a33548ff234470a1cc0587eec0c7ad P a6dd2df099324e599cee6ac593d2f5c4: Bootstrap starting.
I20250411 13:57:06.595121 16351 tablet_bootstrap.cc:492] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702: No bootstrap required, opened a new log
I20250411 13:57:06.595733 16351 ts_tablet_manager.cc:1397] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702: Time spent bootstrapping tablet: real 0.020s	user 0.011s	sys 0.006s
I20250411 13:57:06.596405 16353 tablet_bootstrap.cc:654] T 21a33548ff234470a1cc0587eec0c7ad P a6dd2df099324e599cee6ac593d2f5c4: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:06.598436 16351 raft_consensus.cc:357] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 } } peers { permanent_uuid: "9231028098a14e2c902c68bc82c523db" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 43267 } } peers { permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 } }
I20250411 13:57:06.599422 16351 raft_consensus.cc:383] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:06.599838 16351 raft_consensus.cc:738] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 1eddec810d644687b62a5f4756c4e702, State: Initialized, Role: FOLLOWER
I20250411 13:57:06.600975 16351 consensus_queue.cc:260] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 } } peers { permanent_uuid: "9231028098a14e2c902c68bc82c523db" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 43267 } } peers { permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 } }
I20250411 13:57:06.604466 16351 ts_tablet_manager.cc:1428] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702: Time spent starting tablet: real 0.008s	user 0.005s	sys 0.000s
I20250411 13:57:06.606489 16352 tablet_bootstrap.cc:492] T 21a33548ff234470a1cc0587eec0c7ad P 9231028098a14e2c902c68bc82c523db: No bootstrap required, opened a new log
I20250411 13:57:06.607239 16352 ts_tablet_manager.cc:1397] T 21a33548ff234470a1cc0587eec0c7ad P 9231028098a14e2c902c68bc82c523db: Time spent bootstrapping tablet: real 0.031s	user 0.005s	sys 0.015s
I20250411 13:57:06.609870 16352 raft_consensus.cc:357] T 21a33548ff234470a1cc0587eec0c7ad P 9231028098a14e2c902c68bc82c523db [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 } } peers { permanent_uuid: "9231028098a14e2c902c68bc82c523db" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 43267 } } peers { permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 } }
I20250411 13:57:06.610806 16352 raft_consensus.cc:383] T 21a33548ff234470a1cc0587eec0c7ad P 9231028098a14e2c902c68bc82c523db [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:06.611253 16352 raft_consensus.cc:738] T 21a33548ff234470a1cc0587eec0c7ad P 9231028098a14e2c902c68bc82c523db [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 9231028098a14e2c902c68bc82c523db, State: Initialized, Role: FOLLOWER
I20250411 13:57:06.611954 16352 consensus_queue.cc:260] T 21a33548ff234470a1cc0587eec0c7ad P 9231028098a14e2c902c68bc82c523db [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 } } peers { permanent_uuid: "9231028098a14e2c902c68bc82c523db" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 43267 } } peers { permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 } }
I20250411 13:57:06.613692 16353 tablet_bootstrap.cc:492] T 21a33548ff234470a1cc0587eec0c7ad P a6dd2df099324e599cee6ac593d2f5c4: No bootstrap required, opened a new log
I20250411 13:57:06.614013 16352 ts_tablet_manager.cc:1428] T 21a33548ff234470a1cc0587eec0c7ad P 9231028098a14e2c902c68bc82c523db: Time spent starting tablet: real 0.006s	user 0.004s	sys 0.000s
I20250411 13:57:06.614168 16353 ts_tablet_manager.cc:1397] T 21a33548ff234470a1cc0587eec0c7ad P a6dd2df099324e599cee6ac593d2f5c4: Time spent bootstrapping tablet: real 0.026s	user 0.011s	sys 0.004s
I20250411 13:57:06.622502 16353 raft_consensus.cc:357] T 21a33548ff234470a1cc0587eec0c7ad P a6dd2df099324e599cee6ac593d2f5c4 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 } } peers { permanent_uuid: "9231028098a14e2c902c68bc82c523db" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 43267 } } peers { permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 } }
I20250411 13:57:06.623919 16353 raft_consensus.cc:383] T 21a33548ff234470a1cc0587eec0c7ad P a6dd2df099324e599cee6ac593d2f5c4 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:06.624178 16353 raft_consensus.cc:738] T 21a33548ff234470a1cc0587eec0c7ad P a6dd2df099324e599cee6ac593d2f5c4 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: a6dd2df099324e599cee6ac593d2f5c4, State: Initialized, Role: FOLLOWER
I20250411 13:57:06.624971 16353 consensus_queue.cc:260] T 21a33548ff234470a1cc0587eec0c7ad P a6dd2df099324e599cee6ac593d2f5c4 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 } } peers { permanent_uuid: "9231028098a14e2c902c68bc82c523db" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 43267 } } peers { permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 } }
I20250411 13:57:06.636566 16353 ts_tablet_manager.cc:1428] T 21a33548ff234470a1cc0587eec0c7ad P a6dd2df099324e599cee6ac593d2f5c4: Time spent starting tablet: real 0.022s	user 0.006s	sys 0.009s
I20250411 13:57:06.751334 16335 consensus_queue.cc:1035] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db [LEADER]: Connected to new peer: Peer: permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:57:06.773842 16335 consensus_queue.cc:1035] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db [LEADER]: Connected to new peer: Peer: permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
W20250411 13:57:06.881551 15875 debug-util.cc:398] Leaking SignalData structure 0x7b0800061060 after lost signal to thread 15815
W20250411 13:57:06.882512 15875 debug-util.cc:398] Leaking SignalData structure 0x7b0800061740 after lost signal to thread 15878
W20250411 13:57:06.883261 15875 debug-util.cc:398] Leaking SignalData structure 0x7b0800061e20 after lost signal to thread 15943
W20250411 13:57:06.884068 15875 debug-util.cc:398] Leaking SignalData structure 0x7b08001f1ee0 after lost signal to thread 16008
I20250411 13:57:06.884689 16337 raft_consensus.cc:491] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:57:06.885181 16337 raft_consensus.cc:513] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 } } peers { permanent_uuid: "9231028098a14e2c902c68bc82c523db" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 43267 } } peers { permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 } }
W20250411 13:57:06.886179 15875 debug-util.cc:398] Leaking SignalData structure 0x7b0800219300 after lost signal to thread 16292
I20250411 13:57:06.887107 16337 leader_election.cc:290] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers a6dd2df099324e599cee6ac593d2f5c4 (127.15.113.1:38103), 9231028098a14e2c902c68bc82c523db (127.15.113.2:43267)
W20250411 13:57:06.886483 15875 debug-util.cc:398] Leaking SignalData structure 0x7b08002193e0 after lost signal to thread 16299
I20250411 13:57:06.888321 16190 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "21a33548ff234470a1cc0587eec0c7ad" candidate_uuid: "1eddec810d644687b62a5f4756c4e702" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "9231028098a14e2c902c68bc82c523db" is_pre_election: true
I20250411 13:57:06.888373 16112 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "21a33548ff234470a1cc0587eec0c7ad" candidate_uuid: "1eddec810d644687b62a5f4756c4e702" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "a6dd2df099324e599cee6ac593d2f5c4" is_pre_election: true
I20250411 13:57:06.889093 16190 raft_consensus.cc:2466] T 21a33548ff234470a1cc0587eec0c7ad P 9231028098a14e2c902c68bc82c523db [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 1eddec810d644687b62a5f4756c4e702 in term 0.
I20250411 13:57:06.889283 16112 raft_consensus.cc:2466] T 21a33548ff234470a1cc0587eec0c7ad P a6dd2df099324e599cee6ac593d2f5c4 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 1eddec810d644687b62a5f4756c4e702 in term 0.
I20250411 13:57:06.890726 16234 leader_election.cc:304] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 1eddec810d644687b62a5f4756c4e702, a6dd2df099324e599cee6ac593d2f5c4; no voters: 
I20250411 13:57:06.891753 16337 raft_consensus.cc:2802] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702 [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:57:06.892247 16337 raft_consensus.cc:491] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702 [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:57:06.893218 16337 raft_consensus.cc:3058] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:06.900493 16337 raft_consensus.cc:513] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702 [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 } } peers { permanent_uuid: "9231028098a14e2c902c68bc82c523db" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 43267 } } peers { permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 } }
I20250411 13:57:06.902817 16337 leader_election.cc:290] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702 [CANDIDATE]: Term 1 election: Requested vote from peers a6dd2df099324e599cee6ac593d2f5c4 (127.15.113.1:38103), 9231028098a14e2c902c68bc82c523db (127.15.113.2:43267)
I20250411 13:57:06.903503 16112 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "21a33548ff234470a1cc0587eec0c7ad" candidate_uuid: "1eddec810d644687b62a5f4756c4e702" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "a6dd2df099324e599cee6ac593d2f5c4"
I20250411 13:57:06.903867 16190 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "21a33548ff234470a1cc0587eec0c7ad" candidate_uuid: "1eddec810d644687b62a5f4756c4e702" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "9231028098a14e2c902c68bc82c523db"
I20250411 13:57:06.904115 16112 raft_consensus.cc:3058] T 21a33548ff234470a1cc0587eec0c7ad P a6dd2df099324e599cee6ac593d2f5c4 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:06.904412 16190 raft_consensus.cc:3058] T 21a33548ff234470a1cc0587eec0c7ad P 9231028098a14e2c902c68bc82c523db [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:06.910758 16112 raft_consensus.cc:2466] T 21a33548ff234470a1cc0587eec0c7ad P a6dd2df099324e599cee6ac593d2f5c4 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate 1eddec810d644687b62a5f4756c4e702 in term 1.
I20250411 13:57:06.910732 16190 raft_consensus.cc:2466] T 21a33548ff234470a1cc0587eec0c7ad P 9231028098a14e2c902c68bc82c523db [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate 1eddec810d644687b62a5f4756c4e702 in term 1.
I20250411 13:57:06.912256 16234 leader_election.cc:304] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702 [CANDIDATE]: Term 1 election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 1eddec810d644687b62a5f4756c4e702, a6dd2df099324e599cee6ac593d2f5c4; no voters: 
I20250411 13:57:06.913158 16337 raft_consensus.cc:2802] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702 [term 1 FOLLOWER]: Leader election won for term 1
I20250411 13:57:06.914336 16337 raft_consensus.cc:695] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702 [term 1 LEADER]: Becoming Leader. State: Replica: 1eddec810d644687b62a5f4756c4e702, State: Running, Role: LEADER
I20250411 13:57:06.915256 16337 consensus_queue.cc:237] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702 [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 1, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 } } peers { permanent_uuid: "9231028098a14e2c902c68bc82c523db" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 43267 } } peers { permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 } }
I20250411 13:57:06.929216 15842 catalog_manager.cc:5581] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702 reported cstate change: term changed from 0 to 1, leader changed from <none> to 1eddec810d644687b62a5f4756c4e702 (127.15.113.3). New cstate: current_term: 1 leader_uuid: "1eddec810d644687b62a5f4756c4e702" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a6dd2df099324e599cee6ac593d2f5c4" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 38103 } health_report { overall_health: UNKNOWN } } peers { permanent_uuid: "9231028098a14e2c902c68bc82c523db" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 43267 } health_report { overall_health: UNKNOWN } } peers { permanent_uuid: "1eddec810d644687b62a5f4756c4e702" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 42843 } health_report { overall_health: HEALTHY } } }
I20250411 13:57:06.960353 15812 tablet_server.cc:178] TabletServer@127.15.113.1:0 shutting down...
I20250411 13:57:07.008572 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:07.010215 15812 tablet_replica.cc:331] T 21a33548ff234470a1cc0587eec0c7ad P a6dd2df099324e599cee6ac593d2f5c4: stopping tablet replica
I20250411 13:57:07.011125 15812 raft_consensus.cc:2241] T 21a33548ff234470a1cc0587eec0c7ad P a6dd2df099324e599cee6ac593d2f5c4 [term 1 FOLLOWER]: Raft consensus shutting down.
I20250411 13:57:07.011687 15812 raft_consensus.cc:2270] T 21a33548ff234470a1cc0587eec0c7ad P a6dd2df099324e599cee6ac593d2f5c4 [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:07.014778 15812 tablet_replica.cc:331] T 27323d93ce9a4ae1a054154f0fb6b1cf P a6dd2df099324e599cee6ac593d2f5c4: stopping tablet replica
I20250411 13:57:07.015408 15812 raft_consensus.cc:2241] T 27323d93ce9a4ae1a054154f0fb6b1cf P a6dd2df099324e599cee6ac593d2f5c4 [term 1 FOLLOWER]: Raft consensus shutting down.
I20250411 13:57:07.015884 15812 raft_consensus.cc:2270] T 27323d93ce9a4ae1a054154f0fb6b1cf P a6dd2df099324e599cee6ac593d2f5c4 [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:07.234615 15812 tablet_server.cc:195] TabletServer@127.15.113.1:0 shutdown complete.
I20250411 13:57:07.247290 15812 tablet_server.cc:178] TabletServer@127.15.113.2:0 shutting down...
W20250411 13:57:07.253428 16156 consensus_peers.cc:487] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db -> Peer a6dd2df099324e599cee6ac593d2f5c4 (127.15.113.1:38103): Couldn't send request to peer a6dd2df099324e599cee6ac593d2f5c4. Status: Network error: Client connection negotiation failed: client connection to 127.15.113.1:38103: connect: Connection refused (error 111). This is attempt 1: this message will repeat every 5th retry.
I20250411 13:57:07.283598 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:07.284885 15812 tablet_replica.cc:331] T 21a33548ff234470a1cc0587eec0c7ad P 9231028098a14e2c902c68bc82c523db: stopping tablet replica
I20250411 13:57:07.285665 15812 raft_consensus.cc:2241] T 21a33548ff234470a1cc0587eec0c7ad P 9231028098a14e2c902c68bc82c523db [term 1 FOLLOWER]: Raft consensus shutting down.
I20250411 13:57:07.286208 15812 raft_consensus.cc:2270] T 21a33548ff234470a1cc0587eec0c7ad P 9231028098a14e2c902c68bc82c523db [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:07.288463 15812 tablet_replica.cc:331] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db: stopping tablet replica
I20250411 13:57:07.289148 15812 raft_consensus.cc:2241] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db [term 1 LEADER]: Raft consensus shutting down.
I20250411 13:57:07.290292 15812 raft_consensus.cc:2270] T 27323d93ce9a4ae1a054154f0fb6b1cf P 9231028098a14e2c902c68bc82c523db [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:07.314224 15812 tablet_server.cc:195] TabletServer@127.15.113.2:0 shutdown complete.
I20250411 13:57:07.326277 15812 tablet_server.cc:178] TabletServer@127.15.113.3:0 shutting down...
I20250411 13:57:07.366377 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:07.367189 15812 tablet_replica.cc:331] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702: stopping tablet replica
I20250411 13:57:07.367955 15812 raft_consensus.cc:2241] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702 [term 1 LEADER]: Raft consensus shutting down.
I20250411 13:57:07.368768 15812 pending_rounds.cc:70] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702: Trying to abort 1 pending ops.
I20250411 13:57:07.369148 15812 pending_rounds.cc:77] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702: Aborting op as it isn't in flight: id { term: 1 index: 1 } timestamp: 7144979771054063616 op_type: NO_OP noop_request { }
I20250411 13:57:07.369707 15812 raft_consensus.cc:2887] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702 [term 1 LEADER]: NO_OP replication failed: Aborted: Op aborted
I20250411 13:57:07.370139 15812 raft_consensus.cc:2270] T 21a33548ff234470a1cc0587eec0c7ad P 1eddec810d644687b62a5f4756c4e702 [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:07.373185 15812 tablet_replica.cc:331] T 27323d93ce9a4ae1a054154f0fb6b1cf P 1eddec810d644687b62a5f4756c4e702: stopping tablet replica
I20250411 13:57:07.373977 15812 raft_consensus.cc:2241] T 27323d93ce9a4ae1a054154f0fb6b1cf P 1eddec810d644687b62a5f4756c4e702 [term 1 FOLLOWER]: Raft consensus shutting down.
I20250411 13:57:07.374533 15812 raft_consensus.cc:2270] T 27323d93ce9a4ae1a054154f0fb6b1cf P 1eddec810d644687b62a5f4756c4e702 [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:07.397779 15812 tablet_server.cc:195] TabletServer@127.15.113.3:0 shutdown complete.
I20250411 13:57:07.410845 15812 master.cc:561] Master@127.15.113.62:39157 shutting down...
I20250411 13:57:07.552273 16040 catalog_manager.cc:797] Waiting for catalog manager background task thread to start: Service unavailable: Catalog manager is not initialized. State: Closing
I20250411 13:57:07.710287 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [term 1 LEADER]: Raft consensus shutting down.
I20250411 13:57:07.711076 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068 [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:07.711488 15812 tablet_replica.cc:331] T 00000000000000000000000000000000 P ef7101a74df04a5cbc67d3c9dedba068: stopping tablet replica
W20250411 13:57:08.730147 15812 thread.cc:535] Waited for 1000ms trying to join with diag-logger (tid 15875)
I20250411 13:57:09.209928 16370 raft_consensus.cc:491] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 1 FOLLOWER]: Starting pre-election (detected failure of leader ef7101a74df04a5cbc67d3c9dedba068)
I20250411 13:57:09.210345 16370 raft_consensus.cc:513] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 1 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } }
I20250411 13:57:09.211974 16370 leader_election.cc:290] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [CANDIDATE]: Term 2 pre-election: Requested pre-vote from peers ef7101a74df04a5cbc67d3c9dedba068 (127.15.113.62:39157), 7eebe9d12be74e3881ebe0a09e9ce6e6 (127.15.113.60:44199)
I20250411 13:57:09.213197 15983 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "0ce6dab661e5452f9de8e26a520986a2" candidate_term: 2 candidate_status { last_received { term: 1 index: 10 } } ignore_live_leader: false dest_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" is_pre_election: true
I20250411 13:57:09.213282 16371 raft_consensus.cc:491] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 1 FOLLOWER]: Starting pre-election (detected failure of leader ef7101a74df04a5cbc67d3c9dedba068)
I20250411 13:57:09.213999 16371 raft_consensus.cc:513] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 1 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } }
I20250411 13:57:09.215111 15983 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 1 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 0ce6dab661e5452f9de8e26a520986a2 in term 1.
W20250411 13:57:09.218211 15958 proxy.cc:239] Call had error, refreshing address and retrying: Network error: Client connection negotiation failed: client connection to 127.15.113.62:39157: connect: Connection refused (error 111) [suppressed 6 similar messages]
I20250411 13:57:09.218323 15918 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" candidate_term: 2 candidate_status { last_received { term: 1 index: 10 } } ignore_live_leader: false dest_uuid: "0ce6dab661e5452f9de8e26a520986a2" is_pre_election: true
I20250411 13:57:09.218322 15895 leader_election.cc:304] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [CANDIDATE]: Term 2 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 0ce6dab661e5452f9de8e26a520986a2, 7eebe9d12be74e3881ebe0a09e9ce6e6; no voters: 
I20250411 13:57:09.219262 15918 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 1 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 7eebe9d12be74e3881ebe0a09e9ce6e6 in term 1.
I20250411 13:57:09.219796 16370 raft_consensus.cc:2802] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 1 FOLLOWER]: Leader pre-election won for term 2
I20250411 13:57:09.220075 16371 leader_election.cc:290] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [CANDIDATE]: Term 2 pre-election: Requested pre-vote from peers ef7101a74df04a5cbc67d3c9dedba068 (127.15.113.62:39157), 0ce6dab661e5452f9de8e26a520986a2 (127.15.113.61:45255)
I20250411 13:57:09.220248 16370 raft_consensus.cc:491] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 1 FOLLOWER]: Starting leader election (detected failure of leader ef7101a74df04a5cbc67d3c9dedba068)
I20250411 13:57:09.220744 16370 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 1 FOLLOWER]: Advancing to term 2
I20250411 13:57:09.221973 15957 leader_election.cc:304] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [CANDIDATE]: Term 2 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 0ce6dab661e5452f9de8e26a520986a2, 7eebe9d12be74e3881ebe0a09e9ce6e6; no voters: 
I20250411 13:57:09.222868 16371 raft_consensus.cc:2802] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 1 FOLLOWER]: Leader pre-election won for term 2
I20250411 13:57:09.223201 16371 raft_consensus.cc:491] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 1 FOLLOWER]: Starting leader election (detected failure of leader ef7101a74df04a5cbc67d3c9dedba068)
I20250411 13:57:09.223495 16371 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 1 FOLLOWER]: Advancing to term 2
W20250411 13:57:09.223789 15958 leader_election.cc:336] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [CANDIDATE]: Term 2 pre-election: RPC error from VoteRequest() call to peer ef7101a74df04a5cbc67d3c9dedba068 (127.15.113.62:39157): Network error: Client connection negotiation failed: client connection to 127.15.113.62:39157: connect: Connection refused (error 111)
I20250411 13:57:09.227900 16370 raft_consensus.cc:513] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 2 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } }
I20250411 13:57:09.229991 16371 raft_consensus.cc:513] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 2 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } }
I20250411 13:57:09.231057 15983 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "0ce6dab661e5452f9de8e26a520986a2" candidate_term: 2 candidate_status { last_received { term: 1 index: 10 } } ignore_live_leader: false dest_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6"
I20250411 13:57:09.231829 15983 raft_consensus.cc:2391] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 2 FOLLOWER]: Leader election vote request: Denying vote to candidate 0ce6dab661e5452f9de8e26a520986a2 in current term 2: Already voted for candidate 7eebe9d12be74e3881ebe0a09e9ce6e6 in this term.
I20250411 13:57:09.232431 16370 leader_election.cc:290] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [CANDIDATE]: Term 2 election: Requested vote from peers ef7101a74df04a5cbc67d3c9dedba068 (127.15.113.62:39157), 7eebe9d12be74e3881ebe0a09e9ce6e6 (127.15.113.60:44199)
I20250411 13:57:09.234489 16371 leader_election.cc:290] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [CANDIDATE]: Term 2 election: Requested vote from peers ef7101a74df04a5cbc67d3c9dedba068 (127.15.113.62:39157), 0ce6dab661e5452f9de8e26a520986a2 (127.15.113.61:45255)
I20250411 13:57:09.235461 15918 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" candidate_term: 2 candidate_status { last_received { term: 1 index: 10 } } ignore_live_leader: false dest_uuid: "0ce6dab661e5452f9de8e26a520986a2"
W20250411 13:57:09.235817 15893 leader_election.cc:336] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [CANDIDATE]: Term 2 pre-election: RPC error from VoteRequest() call to peer ef7101a74df04a5cbc67d3c9dedba068 (127.15.113.62:39157): Network error: Client connection negotiation failed: client connection to 127.15.113.62:39157: connect: Connection refused (error 111)
I20250411 13:57:09.236173 15918 raft_consensus.cc:2391] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 2 FOLLOWER]: Leader election vote request: Denying vote to candidate 7eebe9d12be74e3881ebe0a09e9ce6e6 in current term 2: Already voted for candidate 0ce6dab661e5452f9de8e26a520986a2 in this term.
W20250411 13:57:09.237494 15958 leader_election.cc:336] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [CANDIDATE]: Term 2 election: RPC error from VoteRequest() call to peer ef7101a74df04a5cbc67d3c9dedba068 (127.15.113.62:39157): Network error: Client connection negotiation failed: client connection to 127.15.113.62:39157: connect: Connection refused (error 111)
I20250411 13:57:09.237968 15958 leader_election.cc:304] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [CANDIDATE]: Term 2 election: Election decided. Result: candidate lost. Election summary: received 3 responses out of 3 voters: 1 yes votes; 2 no votes. yes voters: 7eebe9d12be74e3881ebe0a09e9ce6e6; no voters: 0ce6dab661e5452f9de8e26a520986a2, ef7101a74df04a5cbc67d3c9dedba068
I20250411 13:57:09.238585 16371 raft_consensus.cc:2747] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 2 FOLLOWER]: Leader election lost for term 2. Reason: could not achieve majority
W20250411 13:57:09.239002 15893 leader_election.cc:336] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [CANDIDATE]: Term 2 election: RPC error from VoteRequest() call to peer ef7101a74df04a5cbc67d3c9dedba068 (127.15.113.62:39157): Network error: Client connection negotiation failed: client connection to 127.15.113.62:39157: connect: Connection refused (error 111)
I20250411 13:57:09.239348 15893 leader_election.cc:304] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [CANDIDATE]: Term 2 election: Election decided. Result: candidate lost. Election summary: received 3 responses out of 3 voters: 1 yes votes; 2 no votes. yes voters: 0ce6dab661e5452f9de8e26a520986a2; no voters: 7eebe9d12be74e3881ebe0a09e9ce6e6, ef7101a74df04a5cbc67d3c9dedba068
I20250411 13:57:09.239889 16370 raft_consensus.cc:2747] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 2 FOLLOWER]: Leader election lost for term 2. Reason: could not achieve majority
W20250411 13:57:09.730631 15812 thread.cc:535] Waited for 2000ms trying to join with diag-logger (tid 15875)
W20250411 13:57:10.731073 15812 thread.cc:535] Waited for 3000ms trying to join with diag-logger (tid 15875)
I20250411 13:57:10.936072 16376 raft_consensus.cc:491] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 2 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:57:10.936465 16376 raft_consensus.cc:513] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 2 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } }
I20250411 13:57:10.937973 16376 leader_election.cc:290] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [CANDIDATE]: Term 3 pre-election: Requested pre-vote from peers ef7101a74df04a5cbc67d3c9dedba068 (127.15.113.62:39157), 0ce6dab661e5452f9de8e26a520986a2 (127.15.113.61:45255)
I20250411 13:57:10.939199 15918 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" candidate_term: 3 candidate_status { last_received { term: 1 index: 10 } } ignore_live_leader: false dest_uuid: "0ce6dab661e5452f9de8e26a520986a2" is_pre_election: true
I20250411 13:57:10.939882 15918 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 2 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 7eebe9d12be74e3881ebe0a09e9ce6e6 in term 2.
I20250411 13:57:10.941313 15957 leader_election.cc:304] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [CANDIDATE]: Term 3 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 0ce6dab661e5452f9de8e26a520986a2, 7eebe9d12be74e3881ebe0a09e9ce6e6; no voters: 
I20250411 13:57:10.942193 16376 raft_consensus.cc:2802] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 2 FOLLOWER]: Leader pre-election won for term 3
I20250411 13:57:10.942549 16376 raft_consensus.cc:491] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 2 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
W20250411 13:57:10.942687 15958 leader_election.cc:336] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [CANDIDATE]: Term 3 pre-election: RPC error from VoteRequest() call to peer ef7101a74df04a5cbc67d3c9dedba068 (127.15.113.62:39157): Network error: Client connection negotiation failed: client connection to 127.15.113.62:39157: connect: Connection refused (error 111)
I20250411 13:57:10.942967 16376 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 2 FOLLOWER]: Advancing to term 3
I20250411 13:57:10.950309 16376 raft_consensus.cc:513] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 3 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } }
I20250411 13:57:10.952266 16376 leader_election.cc:290] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [CANDIDATE]: Term 3 election: Requested vote from peers ef7101a74df04a5cbc67d3c9dedba068 (127.15.113.62:39157), 0ce6dab661e5452f9de8e26a520986a2 (127.15.113.61:45255)
I20250411 13:57:10.953358 15918 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" candidate_term: 3 candidate_status { last_received { term: 1 index: 10 } } ignore_live_leader: false dest_uuid: "0ce6dab661e5452f9de8e26a520986a2"
I20250411 13:57:10.953972 15918 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 2 FOLLOWER]: Advancing to term 3
W20250411 13:57:10.955320 15958 leader_election.cc:336] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [CANDIDATE]: Term 3 election: RPC error from VoteRequest() call to peer ef7101a74df04a5cbc67d3c9dedba068 (127.15.113.62:39157): Network error: Client connection negotiation failed: client connection to 127.15.113.62:39157: connect: Connection refused (error 111)
I20250411 13:57:10.961127 15918 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 3 FOLLOWER]: Leader election vote request: Granting yes vote for candidate 7eebe9d12be74e3881ebe0a09e9ce6e6 in term 3.
I20250411 13:57:10.962287 15957 leader_election.cc:304] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [CANDIDATE]: Term 3 election: Election decided. Result: candidate won. Election summary: received 3 responses out of 3 voters: 2 yes votes; 1 no votes. yes voters: 0ce6dab661e5452f9de8e26a520986a2, 7eebe9d12be74e3881ebe0a09e9ce6e6; no voters: ef7101a74df04a5cbc67d3c9dedba068
I20250411 13:57:10.963088 16376 raft_consensus.cc:2802] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 3 FOLLOWER]: Leader election won for term 3
I20250411 13:57:10.964042 16376 raft_consensus.cc:695] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 3 LEADER]: Becoming Leader. State: Replica: 7eebe9d12be74e3881ebe0a09e9ce6e6, State: Running, Role: LEADER
I20250411 13:57:10.964958 16376 consensus_queue.cc:237] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 10, Committed index: 10, Last appended: 1.10, Last appended by leader: 10, Current term: 3, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } }
I20250411 13:57:10.969985 16379 sys_catalog.cc:455] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [sys.catalog]: SysCatalogTable state changed. Reason: New leader 7eebe9d12be74e3881ebe0a09e9ce6e6. Latest consensus state: current_term: 3 leader_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } } }
I20250411 13:57:10.970505 16379 sys_catalog.cc:458] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:10.972208 16381 catalog_manager.cc:1477] Loading table and tablet metadata into memory...
I20250411 13:57:10.979315 16381 catalog_manager.cc:671] Loaded metadata for table testMasterReplication-2 [id=663a3e8e52364ea0a9eae1d53908162c]
I20250411 13:57:10.980994 16381 catalog_manager.cc:671] Loaded metadata for table testMasterReplication-1 [id=bee46abd8d4e4bfa87d2c72e80145c47]
I20250411 13:57:10.988175 16381 tablet_loader.cc:96] loaded metadata for tablet 21a33548ff234470a1cc0587eec0c7ad (table testMasterReplication-2 [id=663a3e8e52364ea0a9eae1d53908162c])
I20250411 13:57:10.989578 16381 tablet_loader.cc:96] loaded metadata for tablet 27323d93ce9a4ae1a054154f0fb6b1cf (table testMasterReplication-1 [id=bee46abd8d4e4bfa87d2c72e80145c47])
I20250411 13:57:10.990989 16381 catalog_manager.cc:1486] Initializing Kudu cluster ID...
I20250411 13:57:10.995604 16381 catalog_manager.cc:1261] Loaded cluster ID: 65369a8061d140ae883d3590ec905e7c
I20250411 13:57:10.995914 16381 catalog_manager.cc:1497] Initializing Kudu internal certificate authority...
I20250411 13:57:11.000936 16381 catalog_manager.cc:1506] Loading token signing keys...
I20250411 13:57:11.005273 16381 catalog_manager.cc:5965] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6: Loaded TSK: 0
I20250411 13:57:11.006409 16381 catalog_manager.cc:1516] Initializing in-progress tserver states...
W20250411 13:57:11.447220 15958 consensus_peers.cc:487] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 -> Peer ef7101a74df04a5cbc67d3c9dedba068 (127.15.113.62:39157): Couldn't send request to peer ef7101a74df04a5cbc67d3c9dedba068. Status: Network error: Client connection negotiation failed: client connection to 127.15.113.62:39157: connect: Connection refused (error 111). This is attempt 1: this message will repeat every 5th retry.
I20250411 13:57:11.461081 15918 raft_consensus.cc:1273] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 3 FOLLOWER]: Refusing update from remote peer 7eebe9d12be74e3881ebe0a09e9ce6e6: Log matching property violated. Preceding OpId in replica: term: 1 index: 10. Preceding OpId from leader: term: 3 index: 11. (index mismatch)
I20250411 13:57:11.462569 16379 consensus_queue.cc:1035] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [LEADER]: Connected to new peer: Peer: permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 11, Last known committed idx: 10, Time since last communication: 0.000s
I20250411 13:57:11.467931 16382 sys_catalog.cc:455] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [sys.catalog]: SysCatalogTable state changed. Reason: New leader 7eebe9d12be74e3881ebe0a09e9ce6e6. Latest consensus state: current_term: 3 leader_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } } }
I20250411 13:57:11.468565 16382 sys_catalog.cc:458] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:11.473047 16379 sys_catalog.cc:455] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [sys.catalog]: SysCatalogTable state changed. Reason: Peer health change. Latest consensus state: current_term: 3 leader_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } } }
I20250411 13:57:11.473848 16379 sys_catalog.cc:458] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:11.474005 16382 sys_catalog.cc:455] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [sys.catalog]: SysCatalogTable state changed. Reason: Replicated consensus-only round. Latest consensus state: current_term: 3 leader_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "ef7101a74df04a5cbc67d3c9dedba068" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39157 } } peers { permanent_uuid: "0ce6dab661e5452f9de8e26a520986a2" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 45255 } } peers { permanent_uuid: "7eebe9d12be74e3881ebe0a09e9ce6e6" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 44199 } } }
I20250411 13:57:11.474750 16382 sys_catalog.cc:458] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:11.549665 15812 master.cc:583] Master@127.15.113.62:39157 shutdown complete.
I20250411 13:57:11.561594 15812 master.cc:561] Master@127.15.113.61:45255 shutting down...
I20250411 13:57:11.576145 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 3 FOLLOWER]: Raft consensus shutting down.
I20250411 13:57:11.576599 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2 [term 3 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:11.576856 15812 tablet_replica.cc:331] T 00000000000000000000000000000000 P 0ce6dab661e5452f9de8e26a520986a2: stopping tablet replica
I20250411 13:57:11.629091 15812 master.cc:583] Master@127.15.113.61:45255 shutdown complete.
I20250411 13:57:11.638219 15812 master.cc:561] Master@127.15.113.60:44199 shutting down...
I20250411 13:57:11.651773 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 3 LEADER]: Raft consensus shutting down.
I20250411 13:57:11.652565 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6 [term 3 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:11.652992 15812 tablet_replica.cc:331] T 00000000000000000000000000000000 P 7eebe9d12be74e3881ebe0a09e9ce6e6: stopping tablet replica
I20250411 13:57:11.670444 15812 master.cc:583] Master@127.15.113.60:44199 shutdown complete.
[       OK ] MasterReplicationTest.TestSysTablesReplication (9027 ms)
[ RUN      ] MasterReplicationTest.TestTimeoutWhenAllMastersAreDown
I20250411 13:57:11.693893 15812 internal_mini_cluster.cc:156] Creating distributed mini masters. Addrs: 127.15.113.62:38041,127.15.113.61:37757,127.15.113.60:41447
I20250411 13:57:11.695065 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:11.699537 16387 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:11.700350 16388 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:11.701120 15812 server_base.cc:1034] running on GCE node
W20250411 13:57:11.701440 16390 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:11.702158 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:11.702349 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:11.702492 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379831702476 us; error 0 us; skew 500 ppm
I20250411 13:57:11.702998 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:11.709874 15812 webserver.cc:466] Webserver started at http://127.15.113.62:39409/ using document root <none> and password file <none>
I20250411 13:57:11.710294 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:11.710450 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:11.710722 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:11.711731 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/master-0-root/instance:
uuid: "46c40eb1f5f44dc599627f086abb6427"
format_stamp: "Formatted at 2025-04-11 13:57:11 on dist-test-slave-jcj2"
I20250411 13:57:11.715795 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.004s	sys 0.000s
I20250411 13:57:11.718539 16395 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:11.719228 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.002s	sys 0.000s
I20250411 13:57:11.719472 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/master-0-root
uuid: "46c40eb1f5f44dc599627f086abb6427"
format_stamp: "Formatted at 2025-04-11 13:57:11 on dist-test-slave-jcj2"
I20250411 13:57:11.719732 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/master-0-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/master-0-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/master-0-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:11.739957 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:11.740885 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:11.772923 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.62:38041
I20250411 13:57:11.773016 16446 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.62:38041 every 8 connection(s)
I20250411 13:57:11.776350 16447 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:11.776535 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:11.781723 16449 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:11.782552 16450 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:11.782840 16447 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:57:11.785240 16452 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:11.786230 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:11.787329 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:11.787509 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:11.787642 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379831787628 us; error 0 us; skew 500 ppm
I20250411 13:57:11.788079 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:11.790633 15812 webserver.cc:466] Webserver started at http://127.15.113.61:40391/ using document root <none> and password file <none>
I20250411 13:57:11.791110 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:11.791283 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:11.791515 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:11.792639 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/master-1-root/instance:
uuid: "33b9b13e274d41b48e403836a32ff769"
format_stamp: "Formatted at 2025-04-11 13:57:11 on dist-test-slave-jcj2"
I20250411 13:57:11.794972 16447 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:11.797587 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.005s	sys 0.000s
W20250411 13:57:11.799585 16447 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:37757: Network error: Client connection negotiation failed: client connection to 127.15.113.61:37757: connect: Connection refused (error 111)
I20250411 13:57:11.801393 16460 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:11.802080 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.002s	sys 0.000s
I20250411 13:57:11.802340 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/master-1-root
uuid: "33b9b13e274d41b48e403836a32ff769"
format_stamp: "Formatted at 2025-04-11 13:57:11 on dist-test-slave-jcj2"
I20250411 13:57:11.802590 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/master-1-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/master-1-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/master-1-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:11.819015 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:11.819967 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:11.827984 16447 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } attempt: 1
W20250411 13:57:11.832551 16447 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:37757: Network error: Client connection negotiation failed: client connection to 127.15.113.61:37757: connect: Connection refused (error 111)
I20250411 13:57:11.852774 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.61:37757
I20250411 13:57:11.852871 16511 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.61:37757 every 8 connection(s)
I20250411 13:57:11.856222 16512 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:11.856411 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:11.861157 16514 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:11.862123 16515 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:11.862478 16512 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:11.864435 15812 server_base.cc:1034] running on GCE node
W20250411 13:57:11.864490 16517 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:11.865764 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:11.866031 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:11.866235 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379831866218 us; error 0 us; skew 500 ppm
I20250411 13:57:11.866797 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:11.869398 15812 webserver.cc:466] Webserver started at http://127.15.113.60:33805/ using document root <none> and password file <none>
I20250411 13:57:11.869832 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:11.869983 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:11.870232 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:11.871379 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/master-2-root/instance:
uuid: "b16f07c96f9741e0838fbda2a8f464b1"
format_stamp: "Formatted at 2025-04-11 13:57:11 on dist-test-slave-jcj2"
I20250411 13:57:11.873615 16512 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:11.876312 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.005s	sys 0.000s
I20250411 13:57:11.880054 16524 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:11.880848 15812 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.003s	sys 0.000s
I20250411 13:57:11.881103 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/master-2-root
uuid: "b16f07c96f9741e0838fbda2a8f464b1"
format_stamp: "Formatted at 2025-04-11 13:57:11 on dist-test-slave-jcj2"
I20250411 13:57:11.881372 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/master-2-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/master-2-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/master-2-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:11.887712 16512 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:11.887948 16447 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } attempt: 2
W20250411 13:57:11.895490 16512 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:41447: Network error: Client connection negotiation failed: client connection to 127.15.113.60:41447: connect: Connection refused (error 111)
I20250411 13:57:11.897156 16447 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:57:11.900758 16447 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:41447: Network error: Client connection negotiation failed: client connection to 127.15.113.60:41447: connect: Connection refused (error 111)
I20250411 13:57:11.908185 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:11.909170 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:11.928953 16512 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } attempt: 1
W20250411 13:57:11.932883 16512 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:41447: Network error: Client connection negotiation failed: client connection to 127.15.113.60:41447: connect: Connection refused (error 111)
I20250411 13:57:11.944299 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.60:41447
I20250411 13:57:11.944380 16576 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.60:41447 every 8 connection(s)
I20250411 13:57:11.947204 15812 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 0
I20250411 13:57:11.947994 16577 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:11.952179 16447 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } attempt: 1
I20250411 13:57:11.953275 16577 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:11.965183 16577 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:11.970072 16447 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427: Bootstrap starting.
I20250411 13:57:11.974184 16577 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:11.975209 16447 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:11.979176 16447 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427: No bootstrap required, opened a new log
I20250411 13:57:11.981137 16447 raft_consensus.cc:357] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "46c40eb1f5f44dc599627f086abb6427" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } } peers { permanent_uuid: "33b9b13e274d41b48e403836a32ff769" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } } peers { permanent_uuid: "b16f07c96f9741e0838fbda2a8f464b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } }
I20250411 13:57:11.981637 16447 raft_consensus.cc:383] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:11.981958 16447 raft_consensus.cc:738] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 46c40eb1f5f44dc599627f086abb6427, State: Initialized, Role: FOLLOWER
I20250411 13:57:11.982620 16447 consensus_queue.cc:260] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "46c40eb1f5f44dc599627f086abb6427" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } } peers { permanent_uuid: "33b9b13e274d41b48e403836a32ff769" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } } peers { permanent_uuid: "b16f07c96f9741e0838fbda2a8f464b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } }
I20250411 13:57:11.984556 16582 sys_catalog.cc:455] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "46c40eb1f5f44dc599627f086abb6427" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } } peers { permanent_uuid: "33b9b13e274d41b48e403836a32ff769" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } } peers { permanent_uuid: "b16f07c96f9741e0838fbda2a8f464b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } } }
I20250411 13:57:11.985334 16582 sys_catalog.cc:458] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:11.986074 16447 sys_catalog.cc:564] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:11.987548 16577 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1: Bootstrap starting.
I20250411 13:57:11.991461 16577 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:11.994318 16512 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } attempt: 2
I20250411 13:57:11.995947 16577 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1: No bootstrap required, opened a new log
I20250411 13:57:11.998484 16577 raft_consensus.cc:357] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "46c40eb1f5f44dc599627f086abb6427" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } } peers { permanent_uuid: "33b9b13e274d41b48e403836a32ff769" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } } peers { permanent_uuid: "b16f07c96f9741e0838fbda2a8f464b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } }
I20250411 13:57:11.999233 16577 raft_consensus.cc:383] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:11.999442 16577 raft_consensus.cc:738] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: b16f07c96f9741e0838fbda2a8f464b1, State: Initialized, Role: FOLLOWER
I20250411 13:57:11.999943 15812 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 1
I20250411 13:57:12.000319 16577 consensus_queue.cc:260] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "46c40eb1f5f44dc599627f086abb6427" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } } peers { permanent_uuid: "33b9b13e274d41b48e403836a32ff769" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } } peers { permanent_uuid: "b16f07c96f9741e0838fbda2a8f464b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } }
I20250411 13:57:12.003365 16595 sys_catalog.cc:455] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "46c40eb1f5f44dc599627f086abb6427" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } } peers { permanent_uuid: "33b9b13e274d41b48e403836a32ff769" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } } peers { permanent_uuid: "b16f07c96f9741e0838fbda2a8f464b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } } }
I20250411 13:57:12.004142 16595 sys_catalog.cc:458] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [sys.catalog]: This master's current role is: FOLLOWER
W20250411 13:57:12.004716 16594 catalog_manager.cc:1560] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
W20250411 13:57:12.005018 16594 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
I20250411 13:57:12.005613 16577 sys_catalog.cc:564] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:12.010133 16512 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769: Bootstrap starting.
I20250411 13:57:12.014534 16512 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:12.018718 16512 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769: No bootstrap required, opened a new log
W20250411 13:57:12.020709 16607 catalog_manager.cc:1560] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
I20250411 13:57:12.020529 16512 raft_consensus.cc:357] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "46c40eb1f5f44dc599627f086abb6427" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } } peers { permanent_uuid: "33b9b13e274d41b48e403836a32ff769" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } } peers { permanent_uuid: "b16f07c96f9741e0838fbda2a8f464b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } }
W20250411 13:57:12.021031 16607 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
I20250411 13:57:12.021129 16512 raft_consensus.cc:383] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:12.021450 16512 raft_consensus.cc:738] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 33b9b13e274d41b48e403836a32ff769, State: Initialized, Role: FOLLOWER
I20250411 13:57:12.021982 16512 consensus_queue.cc:260] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "46c40eb1f5f44dc599627f086abb6427" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } } peers { permanent_uuid: "33b9b13e274d41b48e403836a32ff769" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } } peers { permanent_uuid: "b16f07c96f9741e0838fbda2a8f464b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } }
I20250411 13:57:12.023434 16608 sys_catalog.cc:455] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "46c40eb1f5f44dc599627f086abb6427" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } } peers { permanent_uuid: "33b9b13e274d41b48e403836a32ff769" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } } peers { permanent_uuid: "b16f07c96f9741e0838fbda2a8f464b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } } }
I20250411 13:57:12.023936 16608 sys_catalog.cc:458] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:12.024530 16512 sys_catalog.cc:564] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:12.033624 15812 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 2
I20250411 13:57:12.034965 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:12.036291 16619 catalog_manager.cc:1560] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
W20250411 13:57:12.036553 16619 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
W20250411 13:57:12.040297 16620 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:12.041746 16621 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:12.042245 16623 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:12.042660 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:12.043419 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:12.043622 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:12.043762 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379832043747 us; error 0 us; skew 500 ppm
I20250411 13:57:12.044272 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:12.046460 15812 webserver.cc:466] Webserver started at http://127.15.113.1:42797/ using document root <none> and password file <none>
I20250411 13:57:12.046911 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:12.047087 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:12.047369 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:12.048322 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/ts-0-root/instance:
uuid: "afd86bbc9f314a498cd7740c1ee02266"
format_stamp: "Formatted at 2025-04-11 13:57:12 on dist-test-slave-jcj2"
I20250411 13:57:12.052281 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.003s	user 0.005s	sys 0.000s
I20250411 13:57:12.055191 16628 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:12.055845 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.002s	sys 0.001s
I20250411 13:57:12.056102 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/ts-0-root
uuid: "afd86bbc9f314a498cd7740c1ee02266"
format_stamp: "Formatted at 2025-04-11 13:57:12 on dist-test-slave-jcj2"
I20250411 13:57:12.056347 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/ts-0-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/ts-0-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/ts-0-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:12.068006 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:12.068997 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:12.070231 15812 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:12.072425 15812 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:12.072628 15812 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:12.072894 15812 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:12.073088 15812 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:12.079082 16595 raft_consensus.cc:491] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:57:12.079595 16595 raft_consensus.cc:513] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "46c40eb1f5f44dc599627f086abb6427" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } } peers { permanent_uuid: "33b9b13e274d41b48e403836a32ff769" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } } peers { permanent_uuid: "b16f07c96f9741e0838fbda2a8f464b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } }
I20250411 13:57:12.082758 16595 leader_election.cc:290] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers 46c40eb1f5f44dc599627f086abb6427 (127.15.113.62:38041), 33b9b13e274d41b48e403836a32ff769 (127.15.113.61:37757)
I20250411 13:57:12.083381 16422 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "b16f07c96f9741e0838fbda2a8f464b1" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "46c40eb1f5f44dc599627f086abb6427" is_pre_election: true
I20250411 13:57:12.084060 16422 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate b16f07c96f9741e0838fbda2a8f464b1 in term 0.
I20250411 13:57:12.084223 16487 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "b16f07c96f9741e0838fbda2a8f464b1" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "33b9b13e274d41b48e403836a32ff769" is_pre_election: true
I20250411 13:57:12.084851 16487 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate b16f07c96f9741e0838fbda2a8f464b1 in term 0.
I20250411 13:57:12.085587 16527 leader_election.cc:304] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 46c40eb1f5f44dc599627f086abb6427, b16f07c96f9741e0838fbda2a8f464b1; no voters: 
I20250411 13:57:12.086513 16595 raft_consensus.cc:2802] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:57:12.086958 16595 raft_consensus.cc:491] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:57:12.087284 16595 raft_consensus.cc:3058] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:12.091589 16595 raft_consensus.cc:513] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "46c40eb1f5f44dc599627f086abb6427" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } } peers { permanent_uuid: "33b9b13e274d41b48e403836a32ff769" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } } peers { permanent_uuid: "b16f07c96f9741e0838fbda2a8f464b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } }
I20250411 13:57:12.093287 16595 leader_election.cc:290] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [CANDIDATE]: Term 1 election: Requested vote from peers 46c40eb1f5f44dc599627f086abb6427 (127.15.113.62:38041), 33b9b13e274d41b48e403836a32ff769 (127.15.113.61:37757)
I20250411 13:57:12.094425 16487 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "b16f07c96f9741e0838fbda2a8f464b1" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "33b9b13e274d41b48e403836a32ff769"
I20250411 13:57:12.094466 16422 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "b16f07c96f9741e0838fbda2a8f464b1" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "46c40eb1f5f44dc599627f086abb6427"
I20250411 13:57:12.095145 16487 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:12.095245 16422 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:12.101459 16487 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate b16f07c96f9741e0838fbda2a8f464b1 in term 1.
I20250411 13:57:12.102528 16527 leader_election.cc:304] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [CANDIDATE]: Term 1 election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 33b9b13e274d41b48e403836a32ff769, b16f07c96f9741e0838fbda2a8f464b1; no voters: 
I20250411 13:57:12.103159 16422 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate b16f07c96f9741e0838fbda2a8f464b1 in term 1.
I20250411 13:57:12.103365 16595 raft_consensus.cc:2802] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [term 1 FOLLOWER]: Leader election won for term 1
I20250411 13:57:12.104876 16595 raft_consensus.cc:695] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [term 1 LEADER]: Becoming Leader. State: Replica: b16f07c96f9741e0838fbda2a8f464b1, State: Running, Role: LEADER
I20250411 13:57:12.106002 16595 consensus_queue.cc:237] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 1, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "46c40eb1f5f44dc599627f086abb6427" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } } peers { permanent_uuid: "33b9b13e274d41b48e403836a32ff769" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } } peers { permanent_uuid: "b16f07c96f9741e0838fbda2a8f464b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } }
I20250411 13:57:12.111112 16670 sys_catalog.cc:455] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [sys.catalog]: SysCatalogTable state changed. Reason: New leader b16f07c96f9741e0838fbda2a8f464b1. Latest consensus state: current_term: 1 leader_uuid: "b16f07c96f9741e0838fbda2a8f464b1" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "46c40eb1f5f44dc599627f086abb6427" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } } peers { permanent_uuid: "33b9b13e274d41b48e403836a32ff769" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } } peers { permanent_uuid: "b16f07c96f9741e0838fbda2a8f464b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } } }
I20250411 13:57:12.111804 16670 sys_catalog.cc:458] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:12.113879 16680 catalog_manager.cc:1477] Loading table and tablet metadata into memory...
I20250411 13:57:12.121028 16680 catalog_manager.cc:1486] Initializing Kudu cluster ID...
I20250411 13:57:12.121652 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.1:42691
I20250411 13:57:12.121726 16693 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.1:42691 every 8 connection(s)
I20250411 13:57:12.138267 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:12.141633 16487 raft_consensus.cc:1273] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769 [term 1 FOLLOWER]: Refusing update from remote peer b16f07c96f9741e0838fbda2a8f464b1: Log matching property violated. Preceding OpId in replica: term: 0 index: 0. Preceding OpId from leader: term: 1 index: 2. (index mismatch)
I20250411 13:57:12.144114 16670 consensus_queue.cc:1035] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [LEADER]: Connected to new peer: Peer: permanent_uuid: "33b9b13e274d41b48e403836a32ff769" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
W20250411 13:57:12.152647 16702 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:12.153805 16694 heartbeater.cc:344] Connected to a master server at 127.15.113.60:41447
I20250411 13:57:12.154368 16694 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:12.155339 16694 heartbeater.cc:507] Master 127.15.113.60:41447 requested a full tablet report, sending...
I20250411 13:57:12.157541 16542 ts_manager.cc:194] Registered new tserver with Master: afd86bbc9f314a498cd7740c1ee02266 (127.15.113.1:42691)
W20250411 13:57:12.158418 16704 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:12.159185 16422 raft_consensus.cc:1273] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427 [term 1 FOLLOWER]: Refusing update from remote peer b16f07c96f9741e0838fbda2a8f464b1: Log matching property violated. Preceding OpId in replica: term: 0 index: 0. Preceding OpId from leader: term: 1 index: 2. (index mismatch)
I20250411 13:57:12.160728 16695 heartbeater.cc:344] Connected to a master server at 127.15.113.61:37757
I20250411 13:57:12.161058 16695 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:12.161911 16695 heartbeater.cc:507] Master 127.15.113.61:37757 requested a full tablet report, sending...
I20250411 13:57:12.166833 16698 heartbeater.cc:344] Connected to a master server at 127.15.113.62:38041
I20250411 13:57:12.167356 16698 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:12.167760 16670 consensus_queue.cc:1035] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [LEADER]: Connected to new peer: Peer: permanent_uuid: "46c40eb1f5f44dc599627f086abb6427" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:57:12.168385 16698 heartbeater.cc:507] Master 127.15.113.62:38041 requested a full tablet report, sending...
I20250411 13:57:12.168649 15812 server_base.cc:1034] running on GCE node
W20250411 13:57:12.171869 16706 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:12.173100 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:12.173413 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:12.173682 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379832173663 us; error 0 us; skew 500 ppm
I20250411 13:57:12.174188 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:12.175760 16477 ts_manager.cc:194] Registered new tserver with Master: afd86bbc9f314a498cd7740c1ee02266 (127.15.113.1:42691)
I20250411 13:57:12.177093 16608 sys_catalog.cc:455] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769 [sys.catalog]: SysCatalogTable state changed. Reason: New leader b16f07c96f9741e0838fbda2a8f464b1. Latest consensus state: current_term: 1 leader_uuid: "b16f07c96f9741e0838fbda2a8f464b1" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "46c40eb1f5f44dc599627f086abb6427" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } } peers { permanent_uuid: "33b9b13e274d41b48e403836a32ff769" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } } peers { permanent_uuid: "b16f07c96f9741e0838fbda2a8f464b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } } }
I20250411 13:57:12.179051 16608 sys_catalog.cc:458] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:12.182216 16582 sys_catalog.cc:455] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427 [sys.catalog]: SysCatalogTable state changed. Reason: New leader b16f07c96f9741e0838fbda2a8f464b1. Latest consensus state: current_term: 1 leader_uuid: "b16f07c96f9741e0838fbda2a8f464b1" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "46c40eb1f5f44dc599627f086abb6427" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } } peers { permanent_uuid: "33b9b13e274d41b48e403836a32ff769" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } } peers { permanent_uuid: "b16f07c96f9741e0838fbda2a8f464b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } } }
I20250411 13:57:12.182971 16582 sys_catalog.cc:458] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:12.183250 16696 mvcc.cc:204] Tried to move back new op lower bound from 7144979792435339264 to 7144979792318545920. Current Snapshot: MvccSnapshot[applied={T|T < 7144979792435339264}]
I20250411 13:57:12.184598 16412 ts_manager.cc:194] Registered new tserver with Master: afd86bbc9f314a498cd7740c1ee02266 (127.15.113.1:42691)
I20250411 13:57:12.185878 15812 webserver.cc:466] Webserver started at http://127.15.113.2:32957/ using document root <none> and password file <none>
I20250411 13:57:12.186379 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:12.186682 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:12.187012 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:12.188539 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/ts-1-root/instance:
uuid: "535fd0b522e44155b6c23f96b97b8c77"
format_stamp: "Formatted at 2025-04-11 13:57:12 on dist-test-slave-jcj2"
I20250411 13:57:12.190512 16608 sys_catalog.cc:455] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769 [sys.catalog]: SysCatalogTable state changed. Reason: Replicated consensus-only round. Latest consensus state: current_term: 1 leader_uuid: "b16f07c96f9741e0838fbda2a8f464b1" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "46c40eb1f5f44dc599627f086abb6427" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } } peers { permanent_uuid: "33b9b13e274d41b48e403836a32ff769" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } } peers { permanent_uuid: "b16f07c96f9741e0838fbda2a8f464b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } } }
I20250411 13:57:12.191257 16608 sys_catalog.cc:458] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:12.194538 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.005s	user 0.005s	sys 0.000s
I20250411 13:57:12.194316 16582 sys_catalog.cc:455] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427 [sys.catalog]: SysCatalogTable state changed. Reason: Replicated consensus-only round. Latest consensus state: current_term: 1 leader_uuid: "b16f07c96f9741e0838fbda2a8f464b1" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "46c40eb1f5f44dc599627f086abb6427" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } } peers { permanent_uuid: "33b9b13e274d41b48e403836a32ff769" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } } peers { permanent_uuid: "b16f07c96f9741e0838fbda2a8f464b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } } }
I20250411 13:57:12.195030 16582 sys_catalog.cc:458] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:12.198734 16718 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:12.199366 16680 catalog_manager.cc:1349] Generated new cluster ID: 7594de2729c041b28941be157a3a9a20
I20250411 13:57:12.198913 16595 sys_catalog.cc:455] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [sys.catalog]: SysCatalogTable state changed. Reason: Peer health change. Latest consensus state: current_term: 1 leader_uuid: "b16f07c96f9741e0838fbda2a8f464b1" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "46c40eb1f5f44dc599627f086abb6427" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } } peers { permanent_uuid: "33b9b13e274d41b48e403836a32ff769" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } } peers { permanent_uuid: "b16f07c96f9741e0838fbda2a8f464b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } } }
I20250411 13:57:12.198807 16703 sys_catalog.cc:455] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [sys.catalog]: SysCatalogTable state changed. Reason: Peer health change. Latest consensus state: current_term: 1 leader_uuid: "b16f07c96f9741e0838fbda2a8f464b1" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "46c40eb1f5f44dc599627f086abb6427" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38041 } } peers { permanent_uuid: "33b9b13e274d41b48e403836a32ff769" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37757 } } peers { permanent_uuid: "b16f07c96f9741e0838fbda2a8f464b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41447 } } }
I20250411 13:57:12.199708 16680 catalog_manager.cc:1497] Initializing Kudu internal certificate authority...
I20250411 13:57:12.199769 16595 sys_catalog.cc:458] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:12.199877 16703 sys_catalog.cc:458] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:12.200027 15812 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.003s	sys 0.000s
I20250411 13:57:12.200524 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/ts-1-root
uuid: "535fd0b522e44155b6c23f96b97b8c77"
format_stamp: "Formatted at 2025-04-11 13:57:12 on dist-test-slave-jcj2"
I20250411 13:57:12.200785 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/ts-1-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/ts-1-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/ts-1-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:12.211966 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:12.213106 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:12.214725 15812 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:12.217124 15812 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:12.217307 15812 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:12.217502 15812 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:12.217638 15812 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:12.227955 16680 catalog_manager.cc:1372] Generated new certificate authority record
I20250411 13:57:12.229637 16680 catalog_manager.cc:1506] Loading token signing keys...
I20250411 13:57:12.253077 16680 catalog_manager.cc:5954] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1: Generated new TSK 0
I20250411 13:57:12.253798 16680 catalog_manager.cc:1516] Initializing in-progress tserver states...
I20250411 13:57:12.280758 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.2:39049
I20250411 13:57:12.280831 16780 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.2:39049 every 8 connection(s)
I20250411 13:57:12.296329 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:12.298267 16781 heartbeater.cc:344] Connected to a master server at 127.15.113.60:41447
I20250411 13:57:12.298691 16781 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:12.299427 16782 heartbeater.cc:344] Connected to a master server at 127.15.113.61:37757
I20250411 13:57:12.299592 16781 heartbeater.cc:507] Master 127.15.113.60:41447 requested a full tablet report, sending...
I20250411 13:57:12.299809 16782 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:12.300705 16782 heartbeater.cc:507] Master 127.15.113.61:37757 requested a full tablet report, sending...
I20250411 13:57:12.302177 16542 ts_manager.cc:194] Registered new tserver with Master: 535fd0b522e44155b6c23f96b97b8c77 (127.15.113.2:39049)
I20250411 13:57:12.303328 16477 ts_manager.cc:194] Registered new tserver with Master: 535fd0b522e44155b6c23f96b97b8c77 (127.15.113.2:39049)
I20250411 13:57:12.305198 16542 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.0.0.1:55762
W20250411 13:57:12.307068 16788 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:12.307749 16783 heartbeater.cc:344] Connected to a master server at 127.15.113.62:38041
I20250411 13:57:12.308221 16783 heartbeater.cc:461] Registering TS with master...
W20250411 13:57:12.309458 16789 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:12.309475 16783 heartbeater.cc:507] Master 127.15.113.62:38041 requested a full tablet report, sending...
W20250411 13:57:12.312453 16791 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:12.312444 16412 ts_manager.cc:194] Registered new tserver with Master: 535fd0b522e44155b6c23f96b97b8c77 (127.15.113.2:39049)
I20250411 13:57:12.312711 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:12.313827 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:12.314096 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:12.314258 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379832314244 us; error 0 us; skew 500 ppm
I20250411 13:57:12.314713 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:12.316994 15812 webserver.cc:466] Webserver started at http://127.15.113.3:38281/ using document root <none> and password file <none>
I20250411 13:57:12.317390 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:12.317533 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:12.317724 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:12.318655 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/ts-2-root/instance:
uuid: "e4c4874b3de04c0bb0e9256b6cb05b5f"
format_stamp: "Formatted at 2025-04-11 13:57:12 on dist-test-slave-jcj2"
I20250411 13:57:12.322721 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.003s	sys 0.000s
I20250411 13:57:12.325642 16796 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:12.326362 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.002s	sys 0.000s
I20250411 13:57:12.326625 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/ts-2-root
uuid: "e4c4874b3de04c0bb0e9256b6cb05b5f"
format_stamp: "Formatted at 2025-04-11 13:57:12 on dist-test-slave-jcj2"
I20250411 13:57:12.326927 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/ts-2-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/ts-2-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestTimeoutWhenAllMastersAreDown.1744379822517842-15812-0/minicluster-data/ts-2-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:12.354530 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:12.355629 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:12.356972 15812 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:12.359117 15812 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:12.359282 15812 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:12.359490 15812 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:12.359620 15812 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:12.395185 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.3:43241
I20250411 13:57:12.395244 16858 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.3:43241 every 8 connection(s)
I20250411 13:57:12.412001 16859 heartbeater.cc:344] Connected to a master server at 127.15.113.60:41447
I20250411 13:57:12.412474 16859 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:12.412971 16860 heartbeater.cc:344] Connected to a master server at 127.15.113.61:37757
I20250411 13:57:12.413331 16859 heartbeater.cc:507] Master 127.15.113.60:41447 requested a full tablet report, sending...
I20250411 13:57:12.413365 16860 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:12.414400 16860 heartbeater.cc:507] Master 127.15.113.61:37757 requested a full tablet report, sending...
I20250411 13:57:12.415482 16542 ts_manager.cc:194] Registered new tserver with Master: e4c4874b3de04c0bb0e9256b6cb05b5f (127.15.113.3:43241)
I20250411 13:57:12.417141 16542 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.0.0.1:55768
I20250411 13:57:12.417564 16477 ts_manager.cc:194] Registered new tserver with Master: e4c4874b3de04c0bb0e9256b6cb05b5f (127.15.113.3:43241)
I20250411 13:57:12.417667 16861 heartbeater.cc:344] Connected to a master server at 127.15.113.62:38041
I20250411 13:57:12.418212 16861 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:12.419176 16861 heartbeater.cc:507] Master 127.15.113.62:38041 requested a full tablet report, sending...
I20250411 13:57:12.421166 16412 ts_manager.cc:194] Registered new tserver with Master: e4c4874b3de04c0bb0e9256b6cb05b5f (127.15.113.3:43241)
I20250411 13:57:12.422171 15812 internal_mini_cluster.cc:371] 3 TS(s) registered with all masters after 0.020001573s
I20250411 13:57:12.424539 15812 tablet_server.cc:178] TabletServer@127.15.113.1:0 shutting down...
I20250411 13:57:12.440503 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:12.455931 15812 tablet_server.cc:195] TabletServer@127.15.113.1:0 shutdown complete.
I20250411 13:57:12.462579 15812 tablet_server.cc:178] TabletServer@127.15.113.2:0 shutting down...
I20250411 13:57:12.476147 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:12.491623 15812 tablet_server.cc:195] TabletServer@127.15.113.2:0 shutdown complete.
I20250411 13:57:12.498417 15812 tablet_server.cc:178] TabletServer@127.15.113.3:0 shutting down...
I20250411 13:57:12.513641 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:12.529017 15812 tablet_server.cc:195] TabletServer@127.15.113.3:0 shutdown complete.
I20250411 13:57:12.535822 15812 master.cc:561] Master@127.15.113.62:38041 shutting down...
I20250411 13:57:12.547641 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427 [term 1 FOLLOWER]: Raft consensus shutting down.
I20250411 13:57:12.548164 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427 [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:12.548462 15812 tablet_replica.cc:331] T 00000000000000000000000000000000 P 46c40eb1f5f44dc599627f086abb6427: stopping tablet replica
I20250411 13:57:12.565960 15812 master.cc:583] Master@127.15.113.62:38041 shutdown complete.
I20250411 13:57:12.575726 15812 master.cc:561] Master@127.15.113.61:37757 shutting down...
I20250411 13:57:12.587863 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769 [term 1 FOLLOWER]: Raft consensus shutting down.
I20250411 13:57:12.588385 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769 [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:12.588689 15812 tablet_replica.cc:331] T 00000000000000000000000000000000 P 33b9b13e274d41b48e403836a32ff769: stopping tablet replica
I20250411 13:57:12.605798 15812 master.cc:583] Master@127.15.113.61:37757 shutdown complete.
I20250411 13:57:12.615036 15812 master.cc:561] Master@127.15.113.60:41447 shutting down...
I20250411 13:57:12.626351 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [term 1 LEADER]: Raft consensus shutting down.
I20250411 13:57:12.627063 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1 [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:12.627419 15812 tablet_replica.cc:331] T 00000000000000000000000000000000 P b16f07c96f9741e0838fbda2a8f464b1: stopping tablet replica
I20250411 13:57:12.644992 15812 master.cc:583] Master@127.15.113.60:41447 shutdown complete.
[       OK ] MasterReplicationTest.TestTimeoutWhenAllMastersAreDown (31161 ms)
[ RUN      ] MasterReplicationTest.TestCycleThroughAllMasters
I20250411 13:57:42.856227 15812 internal_mini_cluster.cc:156] Creating distributed mini masters. Addrs: 127.15.113.62:46181,127.15.113.61:32907,127.15.113.60:39983
I20250411 13:57:42.857509 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:42.862730 16878 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:42.863344 16879 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:42.864642 16881 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:42.865654 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:42.866475 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:42.866649 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:42.866776 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379862866766 us; error 0 us; skew 500 ppm
I20250411 13:57:42.867288 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:42.874732 15812 webserver.cc:466] Webserver started at http://127.15.113.62:41469/ using document root <none> and password file <none>
I20250411 13:57:42.875257 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:42.875425 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:42.875634 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:42.876806 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-0-root/instance:
uuid: "a9054441181f4a8abb031d6843529936"
format_stamp: "Formatted at 2025-04-11 13:57:42 on dist-test-slave-jcj2"
I20250411 13:57:42.881243 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.006s	sys 0.000s
I20250411 13:57:42.884481 16886 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:42.885200 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.001s	sys 0.000s
I20250411 13:57:42.885504 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-0-root
uuid: "a9054441181f4a8abb031d6843529936"
format_stamp: "Formatted at 2025-04-11 13:57:42 on dist-test-slave-jcj2"
I20250411 13:57:42.885776 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-0-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-0-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-0-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:42.896364 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:42.897595 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:42.933996 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.62:46181
I20250411 13:57:42.934096 16937 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.62:46181 every 8 connection(s)
I20250411 13:57:42.938071 16938 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:42.938144 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:42.943780 16940 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:42.945708 16941 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:42.947229 15812 server_base.cc:1034] running on GCE node
W20250411 13:57:42.947561 16943 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:42.947188 16938 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:42.951905 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:42.952268 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:42.952517 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379862952499 us; error 0 us; skew 500 ppm
I20250411 13:57:42.953281 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:42.956497 15812 webserver.cc:466] Webserver started at http://127.15.113.61:45065/ using document root <none> and password file <none>
I20250411 13:57:42.957129 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:42.957356 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:42.957680 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:42.959120 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-1-root/instance:
uuid: "0cee234f25ab4c62a65050963ad4c5a7"
format_stamp: "Formatted at 2025-04-11 13:57:42 on dist-test-slave-jcj2"
I20250411 13:57:42.963065 16938 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:42.964135 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.002s	sys 0.003s
W20250411 13:57:42.965636 16887 proxy.cc:239] Call had error, refreshing address and retrying: Network error: Client connection negotiation failed: client connection to 127.15.113.61:32907: connect: Connection refused (error 111) [suppressed 11 similar messages]
I20250411 13:57:42.968504 16951 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
W20250411 13:57:42.968851 16938 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:32907: Network error: Client connection negotiation failed: client connection to 127.15.113.61:32907: connect: Connection refused (error 111)
I20250411 13:57:42.969321 15812 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.003s	sys 0.001s
I20250411 13:57:42.969705 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-1-root
uuid: "0cee234f25ab4c62a65050963ad4c5a7"
format_stamp: "Formatted at 2025-04-11 13:57:42 on dist-test-slave-jcj2"
I20250411 13:57:42.970080 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-1-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-1-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-1-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:42.994143 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:42.995481 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:43.003345 16938 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } attempt: 1
W20250411 13:57:43.008008 16938 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:32907: Network error: Client connection negotiation failed: client connection to 127.15.113.61:32907: connect: Connection refused (error 111)
I20250411 13:57:43.034082 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.61:32907
I20250411 13:57:43.034171 17002 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.61:32907 every 8 connection(s)
I20250411 13:57:43.038021 17003 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:43.038110 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:43.043503 17005 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:43.043957 17006 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:43.047304 17003 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:57:43.048293 17008 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:43.049268 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:43.051800 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:43.052004 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:43.052162 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379863052149 us; error 0 us; skew 500 ppm
I20250411 13:57:43.052682 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:43.055320 15812 webserver.cc:466] Webserver started at http://127.15.113.60:46183/ using document root <none> and password file <none>
I20250411 13:57:43.055814 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:43.055996 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:43.056223 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:43.057440 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-2-root/instance:
uuid: "ae43c70baafa4136a45e1aa583fda7b1"
format_stamp: "Formatted at 2025-04-11 13:57:43 on dist-test-slave-jcj2"
I20250411 13:57:43.060177 17003 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:43.061534 16938 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } attempt: 2
I20250411 13:57:43.064353 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.006s	user 0.007s	sys 0.000s
I20250411 13:57:43.069862 17016 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:43.071506 15812 fs_manager.cc:730] Time spent opening block manager: real 0.004s	user 0.002s	sys 0.000s
I20250411 13:57:43.071890 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-2-root
uuid: "ae43c70baafa4136a45e1aa583fda7b1"
format_stamp: "Formatted at 2025-04-11 13:57:43 on dist-test-slave-jcj2"
I20250411 13:57:43.072188 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-2-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-2-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-2-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:43.075659 17003 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:43.075850 16938 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:57:43.081146 16938 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:39983: Network error: Client connection negotiation failed: client connection to 127.15.113.60:39983: connect: Connection refused (error 111)
W20250411 13:57:43.082158 17003 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:39983: Network error: Client connection negotiation failed: client connection to 127.15.113.60:39983: connect: Connection refused (error 111)
I20250411 13:57:43.092797 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:43.094108 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:43.120606 16938 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } attempt: 1
W20250411 13:57:43.125191 16938 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:39983: Network error: Client connection negotiation failed: client connection to 127.15.113.60:39983: connect: Connection refused (error 111)
I20250411 13:57:43.129560 17003 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } attempt: 1
I20250411 13:57:43.133086 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.60:39983
I20250411 13:57:43.133184 17068 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.60:39983 every 8 connection(s)
I20250411 13:57:43.136677 15812 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 0
I20250411 13:57:43.137439 17070 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:43.143769 17070 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:43.149307 17003 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7: Bootstrap starting.
I20250411 13:57:43.154379 17070 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:43.154698 17003 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:43.159528 17003 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7: No bootstrap required, opened a new log
I20250411 13:57:43.161955 17003 raft_consensus.cc:357] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:43.162801 17003 raft_consensus.cc:383] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:43.163070 17003 raft_consensus.cc:738] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 0cee234f25ab4c62a65050963ad4c5a7, State: Initialized, Role: FOLLOWER
I20250411 13:57:43.163784 17070 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:43.163687 17003 consensus_queue.cc:260] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:43.166179 17074 sys_catalog.cc:455] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } } }
I20250411 13:57:43.166901 17074 sys_catalog.cc:458] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:43.168309 17003 sys_catalog.cc:564] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:43.179172 17070 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1: Bootstrap starting.
I20250411 13:57:43.183645 17070 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1: Neither blocks nor log segments found. Creating new log.
W20250411 13:57:43.184196 17085 catalog_manager.cc:1560] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
W20250411 13:57:43.184455 17085 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
I20250411 13:57:43.187647 17070 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1: No bootstrap required, opened a new log
I20250411 13:57:43.188645 16938 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } attempt: 2
I20250411 13:57:43.189924 17070 raft_consensus.cc:357] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:43.190419 17070 raft_consensus.cc:383] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:43.190624 17070 raft_consensus.cc:738] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: ae43c70baafa4136a45e1aa583fda7b1, State: Initialized, Role: FOLLOWER
I20250411 13:57:43.191202 17070 consensus_queue.cc:260] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:43.193099 17087 sys_catalog.cc:455] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } } }
I20250411 13:57:43.193893 17087 sys_catalog.cc:458] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:43.194650 17070 sys_catalog.cc:564] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:43.203761 16938 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: Bootstrap starting.
I20250411 13:57:43.203549 17087 raft_consensus.cc:491] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:57:43.204393 17087 raft_consensus.cc:513] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:43.208475 16978 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "ae43c70baafa4136a45e1aa583fda7b1" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "0cee234f25ab4c62a65050963ad4c5a7" is_pre_election: true
I20250411 13:57:43.209249 16978 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate ae43c70baafa4136a45e1aa583fda7b1 in term 0.
I20250411 13:57:43.210192 16938 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:43.210454 17017 leader_election.cc:304] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 0cee234f25ab4c62a65050963ad4c5a7, ae43c70baafa4136a45e1aa583fda7b1; no voters: 
W20250411 13:57:43.210690 17098 catalog_manager.cc:1560] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
W20250411 13:57:43.211162 17098 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
I20250411 13:57:43.207808 17087 leader_election.cc:290] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers a9054441181f4a8abb031d6843529936 (127.15.113.62:46181), 0cee234f25ab4c62a65050963ad4c5a7 (127.15.113.61:32907)
I20250411 13:57:43.211815 17087 raft_consensus.cc:2802] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:57:43.212168 17087 raft_consensus.cc:491] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:57:43.212459 17087 raft_consensus.cc:3058] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:43.217299 16938 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: No bootstrap required, opened a new log
I20250411 13:57:43.219331 17087 raft_consensus.cc:513] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:43.221307 17087 leader_election.cc:290] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [CANDIDATE]: Term 1 election: Requested vote from peers a9054441181f4a8abb031d6843529936 (127.15.113.62:46181), 0cee234f25ab4c62a65050963ad4c5a7 (127.15.113.61:32907)
I20250411 13:57:43.221450 16938 raft_consensus.cc:357] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:43.222030 16938 raft_consensus.cc:383] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:43.222308 16938 raft_consensus.cc:738] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: a9054441181f4a8abb031d6843529936, State: Initialized, Role: FOLLOWER
I20250411 13:57:43.222368 16978 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "ae43c70baafa4136a45e1aa583fda7b1" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "0cee234f25ab4c62a65050963ad4c5a7"
I20250411 13:57:43.222955 16978 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:43.223058 16938 consensus_queue.cc:260] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:43.225384 17101 sys_catalog.cc:455] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } } }
I20250411 13:57:43.226106 17101 sys_catalog.cc:458] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:43.227034 16938 sys_catalog.cc:564] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:43.227268 16913 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "ae43c70baafa4136a45e1aa583fda7b1" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "a9054441181f4a8abb031d6843529936" is_pre_election: true
I20250411 13:57:43.227268 16912 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "ae43c70baafa4136a45e1aa583fda7b1" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "a9054441181f4a8abb031d6843529936"
I20250411 13:57:43.228029 16913 raft_consensus.cc:2466] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate ae43c70baafa4136a45e1aa583fda7b1 in term 0.
I20250411 13:57:43.230124 16978 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate ae43c70baafa4136a45e1aa583fda7b1 in term 1.
I20250411 13:57:43.231336 17017 leader_election.cc:304] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [CANDIDATE]: Term 1 election: Election decided. Result: candidate won. Election summary: received 3 responses out of 3 voters: 2 yes votes; 1 no votes. yes voters: 0cee234f25ab4c62a65050963ad4c5a7, ae43c70baafa4136a45e1aa583fda7b1; no voters: a9054441181f4a8abb031d6843529936
I20250411 13:57:43.231973 17087 raft_consensus.cc:2802] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 1 FOLLOWER]: Leader election won for term 1
I20250411 13:57:43.232549 17087 raft_consensus.cc:695] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 1 LEADER]: Becoming Leader. State: Replica: ae43c70baafa4136a45e1aa583fda7b1, State: Running, Role: LEADER
I20250411 13:57:43.233589 17087 consensus_queue.cc:237] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 1, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:43.238526 17099 sys_catalog.cc:455] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [sys.catalog]: SysCatalogTable state changed. Reason: New leader ae43c70baafa4136a45e1aa583fda7b1. Latest consensus state: current_term: 1 leader_uuid: "ae43c70baafa4136a45e1aa583fda7b1" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } } }
I20250411 13:57:43.239524 17099 sys_catalog.cc:458] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:43.241286 17113 catalog_manager.cc:1477] Loading table and tablet metadata into memory...
I20250411 13:57:43.242529 15812 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 1
I20250411 13:57:43.242838 15812 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 2
I20250411 13:57:43.244511 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:43.245419 17114 catalog_manager.cc:1560] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
W20250411 13:57:43.245699 17114 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
I20250411 13:57:43.248229 17113 catalog_manager.cc:1486] Initializing Kudu cluster ID...
W20250411 13:57:43.248323 16957 tablet.cc:2367] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7: Can't schedule compaction. Clean time has not been advanced past its initial value.
W20250411 13:57:43.251703 17115 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:43.252388 17116 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:43.255089 17118 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:43.255554 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:43.256614 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:43.256876 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:43.257066 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379863257048 us; error 0 us; skew 500 ppm
I20250411 13:57:43.257742 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:43.261080 16978 raft_consensus.cc:1273] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 1 FOLLOWER]: Refusing update from remote peer ae43c70baafa4136a45e1aa583fda7b1: Log matching property violated. Preceding OpId in replica: term: 0 index: 0. Preceding OpId from leader: term: 1 index: 2. (index mismatch)
I20250411 13:57:43.261121 16913 raft_consensus.cc:3058] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:43.262733 17099 consensus_queue.cc:1035] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [LEADER]: Connected to new peer: Peer: permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:57:43.264153 15812 webserver.cc:466] Webserver started at http://127.15.113.1:43851/ using document root <none> and password file <none>
I20250411 13:57:43.264981 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:43.265249 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:43.265568 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:43.267097 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-0-root/instance:
uuid: "ace3562fd6d14838b31ec3b70d1bc687"
format_stamp: "Formatted at 2025-04-11 13:57:43 on dist-test-slave-jcj2"
I20250411 13:57:43.269671 16913 raft_consensus.cc:1273] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 1 FOLLOWER]: Refusing update from remote peer ae43c70baafa4136a45e1aa583fda7b1: Log matching property violated. Preceding OpId in replica: term: 0 index: 0. Preceding OpId from leader: term: 1 index: 2. (index mismatch)
I20250411 13:57:43.271238 17099 consensus_queue.cc:1035] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [LEADER]: Connected to new peer: Peer: permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:57:43.272315 17074 sys_catalog.cc:455] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [sys.catalog]: SysCatalogTable state changed. Reason: New leader ae43c70baafa4136a45e1aa583fda7b1. Latest consensus state: current_term: 1 leader_uuid: "ae43c70baafa4136a45e1aa583fda7b1" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } } }
I20250411 13:57:43.272967 17074 sys_catalog.cc:458] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:43.276374 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.009s	user 0.008s	sys 0.000s
I20250411 13:57:43.283998 17099 sys_catalog.cc:455] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [sys.catalog]: SysCatalogTable state changed. Reason: Peer health change. Latest consensus state: current_term: 1 leader_uuid: "ae43c70baafa4136a45e1aa583fda7b1" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } } }
I20250411 13:57:43.285107 17099 sys_catalog.cc:458] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:43.287151 17113 catalog_manager.cc:1349] Generated new cluster ID: 4cf7324170fb47c9b1bba0891a8eebdf
I20250411 13:57:43.287526 17113 catalog_manager.cc:1497] Initializing Kudu internal certificate authority...
I20250411 13:57:43.291050 17129 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:43.291236 17074 sys_catalog.cc:455] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [sys.catalog]: SysCatalogTable state changed. Reason: Replicated consensus-only round. Latest consensus state: current_term: 1 leader_uuid: "ae43c70baafa4136a45e1aa583fda7b1" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } } }
I20250411 13:57:43.292060 17074 sys_catalog.cc:458] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:43.293524 15812 fs_manager.cc:730] Time spent opening block manager: real 0.014s	user 0.001s	sys 0.008s
I20250411 13:57:43.293969 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-0-root
uuid: "ace3562fd6d14838b31ec3b70d1bc687"
format_stamp: "Formatted at 2025-04-11 13:57:43 on dist-test-slave-jcj2"
I20250411 13:57:43.294354 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-0-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-0-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-0-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:43.294907 17101 sys_catalog.cc:455] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [sys.catalog]: SysCatalogTable state changed. Reason: New leader ae43c70baafa4136a45e1aa583fda7b1. Latest consensus state: current_term: 1 leader_uuid: "ae43c70baafa4136a45e1aa583fda7b1" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } } }
I20250411 13:57:43.295619 17101 sys_catalog.cc:458] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:43.298488 17099 sys_catalog.cc:455] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [sys.catalog]: SysCatalogTable state changed. Reason: Peer health change. Latest consensus state: current_term: 1 leader_uuid: "ae43c70baafa4136a45e1aa583fda7b1" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } } }
I20250411 13:57:43.299180 17099 sys_catalog.cc:458] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:43.303881 17101 sys_catalog.cc:455] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [sys.catalog]: SysCatalogTable state changed. Reason: Replicated consensus-only round. Latest consensus state: current_term: 1 leader_uuid: "ae43c70baafa4136a45e1aa583fda7b1" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } } }
I20250411 13:57:43.304857 17101 sys_catalog.cc:458] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:43.312315 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:43.313558 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:43.315078 15812 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:43.317315 15812 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:43.317538 15812 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:43.317746 15812 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:43.317910 15812 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:43.336087 17113 catalog_manager.cc:1372] Generated new certificate authority record
I20250411 13:57:43.338199 17113 catalog_manager.cc:1506] Loading token signing keys...
I20250411 13:57:43.364106 17113 catalog_manager.cc:5954] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1: Generated new TSK 0
I20250411 13:57:43.365005 17113 catalog_manager.cc:1516] Initializing in-progress tserver states...
I20250411 13:57:43.381068 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.1:34563
I20250411 13:57:43.381158 17194 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.1:34563 every 8 connection(s)
I20250411 13:57:43.392941 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:43.402709 17197 heartbeater.cc:344] Connected to a master server at 127.15.113.62:46181
I20250411 13:57:43.403239 17197 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:43.404413 17195 heartbeater.cc:344] Connected to a master server at 127.15.113.60:39983
I20250411 13:57:43.404398 17197 heartbeater.cc:507] Master 127.15.113.62:46181 requested a full tablet report, sending...
I20250411 13:57:43.405038 17195 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:43.406401 17195 heartbeater.cc:507] Master 127.15.113.60:39983 requested a full tablet report, sending...
I20250411 13:57:43.408506 16903 ts_manager.cc:194] Registered new tserver with Master: ace3562fd6d14838b31ec3b70d1bc687 (127.15.113.1:34563)
I20250411 13:57:43.409248 17034 ts_manager.cc:194] Registered new tserver with Master: ace3562fd6d14838b31ec3b70d1bc687 (127.15.113.1:34563)
I20250411 13:57:43.412362 17196 heartbeater.cc:344] Connected to a master server at 127.15.113.61:32907
I20250411 13:57:43.412873 17196 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:43.413710 17034 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.0.0.1:37734
I20250411 13:57:43.413975 17196 heartbeater.cc:507] Master 127.15.113.61:32907 requested a full tablet report, sending...
W20250411 13:57:43.417838 17202 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:43.419745 16968 ts_manager.cc:194] Registered new tserver with Master: ace3562fd6d14838b31ec3b70d1bc687 (127.15.113.1:34563)
W20250411 13:57:43.419922 17203 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:43.423106 17205 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:43.423234 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:43.424114 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:43.424319 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:43.424510 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379863424492 us; error 0 us; skew 500 ppm
I20250411 13:57:43.425124 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:43.427459 15812 webserver.cc:466] Webserver started at http://127.15.113.2:40625/ using document root <none> and password file <none>
I20250411 13:57:43.427910 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:43.428084 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:43.428306 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:43.429389 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-1-root/instance:
uuid: "79f6c6bf17ed495d9177207734910400"
format_stamp: "Formatted at 2025-04-11 13:57:43 on dist-test-slave-jcj2"
I20250411 13:57:43.433470 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.005s	sys 0.000s
I20250411 13:57:43.436507 17210 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:43.437201 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.000s	sys 0.001s
I20250411 13:57:43.437443 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-1-root
uuid: "79f6c6bf17ed495d9177207734910400"
format_stamp: "Formatted at 2025-04-11 13:57:43 on dist-test-slave-jcj2"
I20250411 13:57:43.437672 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-1-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-1-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-1-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:43.459160 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:43.460368 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:43.461771 15812 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:43.463935 15812 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:43.464097 15812 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:43.464342 15812 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:43.464510 15812 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:43.502328 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.2:46269
I20250411 13:57:43.502420 17272 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.2:46269 every 8 connection(s)
I20250411 13:57:43.509513 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:43.528568 17280 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:43.530854 17281 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:43.531745 17273 heartbeater.cc:344] Connected to a master server at 127.15.113.60:39983
I20250411 13:57:43.532171 17273 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:43.533007 17273 heartbeater.cc:507] Master 127.15.113.60:39983 requested a full tablet report, sending...
I20250411 13:57:43.535642 17034 ts_manager.cc:194] Registered new tserver with Master: 79f6c6bf17ed495d9177207734910400 (127.15.113.2:46269)
W20250411 13:57:43.536698 17284 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:43.537117 17034 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.0.0.1:37740
I20250411 13:57:43.537401 17275 heartbeater.cc:344] Connected to a master server at 127.15.113.62:46181
I20250411 13:57:43.537886 17275 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:43.538933 17275 heartbeater.cc:507] Master 127.15.113.62:46181 requested a full tablet report, sending...
I20250411 13:57:43.541368 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:43.541684 16903 ts_manager.cc:194] Registered new tserver with Master: 79f6c6bf17ed495d9177207734910400 (127.15.113.2:46269)
I20250411 13:57:43.542635 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:43.542938 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:43.542999 17274 heartbeater.cc:344] Connected to a master server at 127.15.113.61:32907
I20250411 13:57:43.543119 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379863543104 us; error 0 us; skew 500 ppm
I20250411 13:57:43.543385 17274 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:43.543712 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:43.543921 17274 heartbeater.cc:507] Master 127.15.113.61:32907 requested a full tablet report, sending...
I20250411 13:57:43.546452 15812 webserver.cc:466] Webserver started at http://127.15.113.3:41785/ using document root <none> and password file <none>
I20250411 13:57:43.546494 16968 ts_manager.cc:194] Registered new tserver with Master: 79f6c6bf17ed495d9177207734910400 (127.15.113.2:46269)
I20250411 13:57:43.547231 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:43.547459 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:43.547756 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:43.548647 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-2-root/instance:
uuid: "43d31dfb5f814ee1970e1e91348e55a8"
format_stamp: "Formatted at 2025-04-11 13:57:43 on dist-test-slave-jcj2"
I20250411 13:57:43.552613 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.005s	sys 0.000s
I20250411 13:57:43.555747 17288 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:43.556388 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.003s	sys 0.000s
I20250411 13:57:43.556651 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-2-root
uuid: "43d31dfb5f814ee1970e1e91348e55a8"
format_stamp: "Formatted at 2025-04-11 13:57:43 on dist-test-slave-jcj2"
I20250411 13:57:43.556879 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-2-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-2-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-2-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:43.588846 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:43.589978 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:43.591229 15812 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:43.593071 15812 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:43.593235 15812 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:43.593495 15812 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:43.593706 15812 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:43.631887 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.3:35775
I20250411 13:57:43.631963 17350 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.3:35775 every 8 connection(s)
I20250411 13:57:43.649420 17351 heartbeater.cc:344] Connected to a master server at 127.15.113.60:39983
I20250411 13:57:43.649832 17351 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:43.650724 17351 heartbeater.cc:507] Master 127.15.113.60:39983 requested a full tablet report, sending...
I20250411 13:57:43.651648 17353 heartbeater.cc:344] Connected to a master server at 127.15.113.62:46181
I20250411 13:57:43.652053 17353 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:43.652774 17034 ts_manager.cc:194] Registered new tserver with Master: 43d31dfb5f814ee1970e1e91348e55a8 (127.15.113.3:35775)
I20250411 13:57:43.652899 17353 heartbeater.cc:507] Master 127.15.113.62:46181 requested a full tablet report, sending...
I20250411 13:57:43.654475 17034 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.0.0.1:37746
I20250411 13:57:43.655378 16903 ts_manager.cc:194] Registered new tserver with Master: 43d31dfb5f814ee1970e1e91348e55a8 (127.15.113.3:35775)
I20250411 13:57:43.656702 17352 heartbeater.cc:344] Connected to a master server at 127.15.113.61:32907
I20250411 13:57:43.657035 17352 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:43.657472 17352 heartbeater.cc:507] Master 127.15.113.61:32907 requested a full tablet report, sending...
I20250411 13:57:43.659579 16968 ts_manager.cc:194] Registered new tserver with Master: 43d31dfb5f814ee1970e1e91348e55a8 (127.15.113.3:35775)
I20250411 13:57:43.660013 15812 internal_mini_cluster.cc:371] 3 TS(s) registered with all masters after 0.020563848s
I20250411 13:57:43.662179 15812 tablet_server.cc:178] TabletServer@127.15.113.1:0 shutting down...
I20250411 13:57:43.677392 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:43.693508 15812 tablet_server.cc:195] TabletServer@127.15.113.1:0 shutdown complete.
I20250411 13:57:43.700914 15812 tablet_server.cc:178] TabletServer@127.15.113.2:0 shutting down...
I20250411 13:57:43.715808 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:43.731572 15812 tablet_server.cc:195] TabletServer@127.15.113.2:0 shutdown complete.
I20250411 13:57:43.738384 15812 tablet_server.cc:178] TabletServer@127.15.113.3:0 shutting down...
I20250411 13:57:43.753142 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:43.769062 15812 tablet_server.cc:195] TabletServer@127.15.113.3:0 shutdown complete.
I20250411 13:57:43.775900 15812 master.cc:561] Master@127.15.113.62:46181 shutting down...
I20250411 13:57:43.787611 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 1 FOLLOWER]: Raft consensus shutting down.
I20250411 13:57:43.788116 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:43.788421 15812 tablet_replica.cc:331] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: stopping tablet replica
I20250411 13:57:43.806493 15812 master.cc:583] Master@127.15.113.62:46181 shutdown complete.
I20250411 13:57:43.816890 15812 master.cc:561] Master@127.15.113.61:32907 shutting down...
I20250411 13:57:43.828603 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 1 FOLLOWER]: Raft consensus shutting down.
I20250411 13:57:43.829164 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:43.829561 15812 tablet_replica.cc:331] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7: stopping tablet replica
W20250411 13:57:43.829641 17018 consensus_peers.cc:487] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 -> Peer a9054441181f4a8abb031d6843529936 (127.15.113.62:46181): Couldn't send request to peer a9054441181f4a8abb031d6843529936. Status: Network error: Client connection negotiation failed: client connection to 127.15.113.62:46181: connect: Connection refused (error 111). This is attempt 1: this message will repeat every 5th retry.
I20250411 13:57:43.839437 15812 master.cc:583] Master@127.15.113.61:32907 shutdown complete.
W20250411 13:57:43.844595 17017 consensus_peers.cc:487] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 -> Peer 0cee234f25ab4c62a65050963ad4c5a7 (127.15.113.61:32907): Couldn't send request to peer 0cee234f25ab4c62a65050963ad4c5a7. Status: Network error: Client connection negotiation failed: client connection to 127.15.113.61:32907: connect: Connection refused (error 111). This is attempt 1: this message will repeat every 5th retry.
I20250411 13:57:43.848913 15812 master.cc:561] Master@127.15.113.60:39983 shutting down...
I20250411 13:57:43.861645 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 1 LEADER]: Raft consensus shutting down.
I20250411 13:57:43.862412 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:43.862834 15812 tablet_replica.cc:331] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1: stopping tablet replica
I20250411 13:57:43.880569 15812 master.cc:583] Master@127.15.113.60:39983 shutdown complete.
I20250411 13:57:43.889489 17361 master_replication-itest.cc:118] Sleeping for 1000 ms...
I20250411 13:57:44.890044 17361 master_replication-itest.cc:120] Attempting to start the cluster...
I20250411 13:57:44.891431 17361 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:44.896560 17369 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:44.897521 17370 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:44.898937 17372 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:44.899505 17361 server_base.cc:1034] running on GCE node
I20250411 13:57:44.900295 17361 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:44.900475 17361 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:44.900641 17361 hybrid_clock.cc:648] HybridClock initialized: now 1744379864900623 us; error 0 us; skew 500 ppm
I20250411 13:57:44.901211 17361 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:44.903848 17361 webserver.cc:466] Webserver started at http://127.15.113.62:41505/ using document root <none> and password file <none>
I20250411 13:57:44.904353 17361 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:44.904551 17361 fs_manager.cc:365] Using existing metadata directory in first data directory
I20250411 13:57:44.908103 17361 fs_manager.cc:714] Time spent opening directory manager: real 0.003s	user 0.004s	sys 0.000s
I20250411 13:57:44.911115 17377 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:44.911849 17361 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.003s	sys 0.000s
I20250411 13:57:44.912125 17361 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-0-root
uuid: "a9054441181f4a8abb031d6843529936"
format_stamp: "Formatted at 2025-04-11 13:57:42 on dist-test-slave-jcj2"
I20250411 13:57:44.912406 17361 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-0-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-0-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-0-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:44.928519 17361 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:44.929605 17361 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:44.966487 17361 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.62:46181
I20250411 13:57:44.966552 17428 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.62:46181 every 8 connection(s)
I20250411 13:57:44.970631 17361 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:44.976047 17431 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:44.976634 17429 sys_catalog.cc:263] Verifying existing consensus state
W20250411 13:57:44.977092 17432 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:44.980108 17434 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:44.980410 17361 server_base.cc:1034] running on GCE node
I20250411 13:57:44.981251 17361 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:44.981469 17361 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:44.981630 17361 hybrid_clock.cc:648] HybridClock initialized: now 1744379864981615 us; error 0 us; skew 500 ppm
I20250411 13:57:44.982175 17361 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:44.982929 17429 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: Bootstrap starting.
I20250411 13:57:44.984903 17361 webserver.cc:466] Webserver started at http://127.15.113.61:37415/ using document root <none> and password file <none>
I20250411 13:57:44.985451 17361 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:44.985656 17361 fs_manager.cc:365] Using existing metadata directory in first data directory
I20250411 13:57:44.989465 17361 fs_manager.cc:714] Time spent opening directory manager: real 0.003s	user 0.004s	sys 0.000s
I20250411 13:57:44.992983 17440 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:44.993868 17361 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.002s	sys 0.000s
I20250411 13:57:44.994150 17361 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-1-root
uuid: "0cee234f25ab4c62a65050963ad4c5a7"
format_stamp: "Formatted at 2025-04-11 13:57:42 on dist-test-slave-jcj2"
I20250411 13:57:44.994428 17361 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-1-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-1-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-1-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:45.004861 17429 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: Bootstrap replayed 1/1 log segments. Stats: ops{read=4 overwritten=0 applied=4 ignored=0} inserts{seen=3 ignored=0} mutations{seen=0 ignored=0} orphaned_commits=0. Pending: 0 replicates
I20250411 13:57:45.005686 17429 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: Bootstrap complete.
I20250411 13:57:45.007916 17429 raft_consensus.cc:357] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 1 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:45.008410 17429 raft_consensus.cc:738] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 1 FOLLOWER]: Becoming Follower/Learner. State: Replica: a9054441181f4a8abb031d6843529936, State: Initialized, Role: FOLLOWER
I20250411 13:57:45.009011 17429 consensus_queue.cc:260] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 4, Last appended: 1.4, Last appended by leader: 4, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:45.010747 17445 sys_catalog.cc:455] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 1 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } } }
I20250411 13:57:45.011464 17445 sys_catalog.cc:458] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:45.012591 17429 sys_catalog.cc:564] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:45.028724 17361 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:45.030359 17361 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:45.033190 17457 catalog_manager.cc:1261] Loaded cluster ID: 4cf7324170fb47c9b1bba0891a8eebdf
I20250411 13:57:45.033526 17457 catalog_manager.cc:1554] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: loading cluster ID for follower catalog manager: success
I20250411 13:57:45.037621 17457 catalog_manager.cc:1576] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: acquiring CA information for follower catalog manager: success
I20250411 13:57:45.040985 17457 catalog_manager.cc:1604] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: importing token verification keys for follower catalog manager: success; most recent TSK sequence number 0
I20250411 13:57:45.066426 17361 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.61:32907
I20250411 13:57:45.066484 17504 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.61:32907 every 8 connection(s)
I20250411 13:57:45.070700 17361 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:45.074740 17505 sys_catalog.cc:263] Verifying existing consensus state
W20250411 13:57:45.076334 17507 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:45.077999 17508 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:45.082630 17505 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7: Bootstrap starting.
W20250411 13:57:45.083942 17510 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:45.084553 17361 server_base.cc:1034] running on GCE node
I20250411 13:57:45.085595 17361 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:45.085883 17361 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:45.086090 17361 hybrid_clock.cc:648] HybridClock initialized: now 1744379865086071 us; error 0 us; skew 500 ppm
I20250411 13:57:45.086949 17361 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:45.090144 17361 webserver.cc:466] Webserver started at http://127.15.113.60:45763/ using document root <none> and password file <none>
I20250411 13:57:45.090787 17361 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:45.091032 17361 fs_manager.cc:365] Using existing metadata directory in first data directory
I20250411 13:57:45.095793 17361 fs_manager.cc:714] Time spent opening directory manager: real 0.004s	user 0.001s	sys 0.003s
I20250411 13:57:45.099818 17517 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:45.100682 17361 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.004s	sys 0.000s
I20250411 13:57:45.101012 17361 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-2-root
uuid: "ae43c70baafa4136a45e1aa583fda7b1"
format_stamp: "Formatted at 2025-04-11 13:57:43 on dist-test-slave-jcj2"
I20250411 13:57:45.101361 17361 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-2-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-2-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-2-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:45.108779 17505 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7: Bootstrap replayed 1/1 log segments. Stats: ops{read=4 overwritten=0 applied=4 ignored=0} inserts{seen=3 ignored=0} mutations{seen=0 ignored=0} orphaned_commits=0. Pending: 0 replicates
I20250411 13:57:45.109630 17505 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7: Bootstrap complete.
I20250411 13:57:45.112329 17505 raft_consensus.cc:357] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 1 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:45.112998 17505 raft_consensus.cc:738] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 1 FOLLOWER]: Becoming Follower/Learner. State: Replica: 0cee234f25ab4c62a65050963ad4c5a7, State: Initialized, Role: FOLLOWER
I20250411 13:57:45.113679 17505 consensus_queue.cc:260] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 4, Last appended: 1.4, Last appended by leader: 4, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:45.115500 17522 sys_catalog.cc:455] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 1 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } } }
I20250411 13:57:45.116048 17522 sys_catalog.cc:458] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:45.116504 17361 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:45.117179 17505 sys_catalog.cc:564] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:45.118005 17361 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:45.132774 17535 catalog_manager.cc:1261] Loaded cluster ID: 4cf7324170fb47c9b1bba0891a8eebdf
I20250411 13:57:45.133037 17535 catalog_manager.cc:1554] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7: loading cluster ID for follower catalog manager: success
I20250411 13:57:45.137599 17535 catalog_manager.cc:1576] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7: acquiring CA information for follower catalog manager: success
I20250411 13:57:45.141475 17535 catalog_manager.cc:1604] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7: importing token verification keys for follower catalog manager: success; most recent TSK sequence number 0
I20250411 13:57:45.161787 17361 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.60:39983
I20250411 13:57:45.161864 17580 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.60:39983 every 8 connection(s)
I20250411 13:57:45.164597 17361 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 0
I20250411 13:57:45.164858 17361 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 1
I20250411 13:57:45.165036 17361 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 2
I20250411 13:57:45.169812 17581 sys_catalog.cc:263] Verifying existing consensus state
I20250411 13:57:45.173995 17581 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1: Bootstrap starting.
I20250411 13:57:45.193883 17581 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1: Bootstrap replayed 1/1 log segments. Stats: ops{read=4 overwritten=0 applied=4 ignored=0} inserts{seen=3 ignored=0} mutations{seen=0 ignored=0} orphaned_commits=0. Pending: 0 replicates
I20250411 13:57:45.194666 17581 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1: Bootstrap complete.
I20250411 13:57:45.197069 17581 raft_consensus.cc:357] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 1 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:45.197641 17581 raft_consensus.cc:738] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 1 FOLLOWER]: Becoming Follower/Learner. State: Replica: ae43c70baafa4136a45e1aa583fda7b1, State: Initialized, Role: FOLLOWER
I20250411 13:57:45.198323 17581 consensus_queue.cc:260] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 4, Last appended: 1.4, Last appended by leader: 4, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:45.200148 17585 sys_catalog.cc:455] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 1 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } } }
I20250411 13:57:45.200922 17585 sys_catalog.cc:458] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:45.201601 17581 sys_catalog.cc:564] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:45.211982 17361 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:45.214149 17596 catalog_manager.cc:1261] Loaded cluster ID: 4cf7324170fb47c9b1bba0891a8eebdf
I20250411 13:57:45.214412 17596 catalog_manager.cc:1554] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1: loading cluster ID for follower catalog manager: success
W20250411 13:57:45.218889 17597 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:45.220093 17598 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:45.220525 17596 catalog_manager.cc:1576] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1: acquiring CA information for follower catalog manager: success
W20250411 13:57:45.222569 17600 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:45.222949 17361 server_base.cc:1034] running on GCE node
I20250411 13:57:45.223762 17361 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:45.223937 17361 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:45.224100 17361 hybrid_clock.cc:648] HybridClock initialized: now 1744379865224081 us; error 0 us; skew 500 ppm
I20250411 13:57:45.224663 17361 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:45.225096 17596 catalog_manager.cc:1604] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1: importing token verification keys for follower catalog manager: success; most recent TSK sequence number 0
I20250411 13:57:45.227121 17361 webserver.cc:466] Webserver started at http://127.15.113.1:39143/ using document root <none> and password file <none>
I20250411 13:57:45.227596 17361 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:45.227763 17361 fs_manager.cc:365] Using existing metadata directory in first data directory
I20250411 13:57:45.230922 17361 fs_manager.cc:714] Time spent opening directory manager: real 0.002s	user 0.000s	sys 0.003s
I20250411 13:57:45.233278 17605 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:45.233994 17361 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.002s	sys 0.000s
I20250411 13:57:45.234273 17361 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-0-root
uuid: "ace3562fd6d14838b31ec3b70d1bc687"
format_stamp: "Formatted at 2025-04-11 13:57:43 on dist-test-slave-jcj2"
I20250411 13:57:45.234555 17361 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-0-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-0-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-0-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:45.261155 17361 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:45.262586 17361 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:45.264400 17361 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:45.266793 17361 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:45.267027 17361 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:45.267242 17361 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:45.267391 17361 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:45.303771 17361 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.1:40311
I20250411 13:57:45.303895 17667 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.1:40311 every 8 connection(s)
I20250411 13:57:45.311297 17361 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:45.326121 17676 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:45.325532 17675 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:45.329843 17668 heartbeater.cc:344] Connected to a master server at 127.15.113.60:39983
I20250411 13:57:45.330242 17668 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:45.330991 17670 heartbeater.cc:344] Connected to a master server at 127.15.113.62:46181
I20250411 13:57:45.331080 17668 heartbeater.cc:507] Master 127.15.113.60:39983 requested a full tablet report, sending...
I20250411 13:57:45.331621 17670 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:45.332693 17670 heartbeater.cc:507] Master 127.15.113.62:46181 requested a full tablet report, sending...
I20250411 13:57:45.333897 17546 ts_manager.cc:194] Registered new tserver with Master: ace3562fd6d14838b31ec3b70d1bc687 (127.15.113.1:40311)
I20250411 13:57:45.336097 17394 ts_manager.cc:194] Registered new tserver with Master: ace3562fd6d14838b31ec3b70d1bc687 (127.15.113.1:40311)
W20250411 13:57:45.340263 17678 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:45.340618 17361 server_base.cc:1034] running on GCE node
I20250411 13:57:45.341034 17669 heartbeater.cc:344] Connected to a master server at 127.15.113.61:32907
I20250411 13:57:45.341470 17669 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:45.341825 17361 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:45.342090 17361 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:45.342307 17361 hybrid_clock.cc:648] HybridClock initialized: now 1744379865342285 us; error 0 us; skew 500 ppm
I20250411 13:57:45.342316 17669 heartbeater.cc:507] Master 127.15.113.61:32907 requested a full tablet report, sending...
I20250411 13:57:45.343222 17361 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:45.344679 17470 ts_manager.cc:194] Registered new tserver with Master: ace3562fd6d14838b31ec3b70d1bc687 (127.15.113.1:40311)
I20250411 13:57:45.347103 17361 webserver.cc:466] Webserver started at http://127.15.113.2:38291/ using document root <none> and password file <none>
I20250411 13:57:45.347576 17361 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:45.347738 17361 fs_manager.cc:365] Using existing metadata directory in first data directory
I20250411 13:57:45.350986 17361 fs_manager.cc:714] Time spent opening directory manager: real 0.003s	user 0.003s	sys 0.000s
I20250411 13:57:45.353431 17683 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:45.354130 17361 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.002s	sys 0.000s
I20250411 13:57:45.354403 17361 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-1-root
uuid: "79f6c6bf17ed495d9177207734910400"
format_stamp: "Formatted at 2025-04-11 13:57:43 on dist-test-slave-jcj2"
I20250411 13:57:45.354672 17361 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-1-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-1-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-1-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:45.365638 17361 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:45.366735 17361 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:45.368120 17361 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:45.370291 17361 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:45.370481 17361 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:45.370692 17361 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:45.370849 17361 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:45.406296 17361 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.2:33073
I20250411 13:57:45.406389 17745 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.2:33073 every 8 connection(s)
I20250411 13:57:45.412938 17361 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:45.423658 17753 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:45.430778 17746 heartbeater.cc:344] Connected to a master server at 127.15.113.60:39983
I20250411 13:57:45.431289 17746 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:45.432034 17748 heartbeater.cc:344] Connected to a master server at 127.15.113.62:46181
I20250411 13:57:45.432169 17746 heartbeater.cc:507] Master 127.15.113.60:39983 requested a full tablet report, sending...
I20250411 13:57:45.432585 17748 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:45.433688 17748 heartbeater.cc:507] Master 127.15.113.62:46181 requested a full tablet report, sending...
I20250411 13:57:45.435142 17546 ts_manager.cc:194] Registered new tserver with Master: 79f6c6bf17ed495d9177207734910400 (127.15.113.2:33073)
I20250411 13:57:45.436586 17747 heartbeater.cc:344] Connected to a master server at 127.15.113.61:32907
I20250411 13:57:45.437038 17747 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:45.437054 17394 ts_manager.cc:194] Registered new tserver with Master: 79f6c6bf17ed495d9177207734910400 (127.15.113.2:33073)
I20250411 13:57:45.438184 17747 heartbeater.cc:507] Master 127.15.113.61:32907 requested a full tablet report, sending...
I20250411 13:57:45.441511 17470 ts_manager.cc:194] Registered new tserver with Master: 79f6c6bf17ed495d9177207734910400 (127.15.113.2:33073)
W20250411 13:57:45.441991 17754 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:45.443434 17756 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:45.444236 17361 server_base.cc:1034] running on GCE node
I20250411 13:57:45.445025 17361 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:45.445220 17361 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:45.445394 17361 hybrid_clock.cc:648] HybridClock initialized: now 1744379865445375 us; error 0 us; skew 500 ppm
I20250411 13:57:45.445894 17361 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:45.448276 17361 webserver.cc:466] Webserver started at http://127.15.113.3:46349/ using document root <none> and password file <none>
I20250411 13:57:45.448693 17361 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:45.448855 17361 fs_manager.cc:365] Using existing metadata directory in first data directory
I20250411 13:57:45.451974 17361 fs_manager.cc:714] Time spent opening directory manager: real 0.002s	user 0.002s	sys 0.001s
I20250411 13:57:45.454594 17761 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:45.455350 17361 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.002s	sys 0.001s
I20250411 13:57:45.455632 17361 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-2-root
uuid: "43d31dfb5f814ee1970e1e91348e55a8"
format_stamp: "Formatted at 2025-04-11 13:57:43 on dist-test-slave-jcj2"
I20250411 13:57:45.455920 17361 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-2-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-2-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-2-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:45.468979 17361 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:45.470039 17361 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:45.471386 17361 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:45.473709 17361 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:45.473995 17361 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:45.474237 17361 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:45.474433 17361 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:45.512565 17361 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.3:42833
I20250411 13:57:45.512660 17823 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.3:42833 every 8 connection(s)
I20250411 13:57:45.531224 17826 heartbeater.cc:344] Connected to a master server at 127.15.113.62:46181
I20250411 13:57:45.531351 17824 heartbeater.cc:344] Connected to a master server at 127.15.113.60:39983
I20250411 13:57:45.531612 17826 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:45.531788 17824 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:45.532642 17825 heartbeater.cc:344] Connected to a master server at 127.15.113.61:32907
I20250411 13:57:45.532693 17824 heartbeater.cc:507] Master 127.15.113.60:39983 requested a full tablet report, sending...
I20250411 13:57:45.533093 17825 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:45.533998 17825 heartbeater.cc:507] Master 127.15.113.61:32907 requested a full tablet report, sending...
I20250411 13:57:45.535626 17545 ts_manager.cc:194] Registered new tserver with Master: 43d31dfb5f814ee1970e1e91348e55a8 (127.15.113.3:42833)
I20250411 13:57:45.532507 17826 heartbeater.cc:507] Master 127.15.113.62:46181 requested a full tablet report, sending...
I20250411 13:57:45.537974 17470 ts_manager.cc:194] Registered new tserver with Master: 43d31dfb5f814ee1970e1e91348e55a8 (127.15.113.3:42833)
I20250411 13:57:45.538990 17394 ts_manager.cc:194] Registered new tserver with Master: 43d31dfb5f814ee1970e1e91348e55a8 (127.15.113.3:42833)
I20250411 13:57:45.539096 17361 internal_mini_cluster.cc:371] 3 TS(s) registered with all masters after 0.021000147s
I20250411 13:57:46.357409 17832 raft_consensus.cc:491] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 1 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:57:46.357858 17832 raft_consensus.cc:513] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 1 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:46.359851 17832 leader_election.cc:290] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [CANDIDATE]: Term 2 pre-election: Requested pre-vote from peers 0cee234f25ab4c62a65050963ad4c5a7 (127.15.113.61:32907), ae43c70baafa4136a45e1aa583fda7b1 (127.15.113.60:39983)
I20250411 13:57:46.370255 17480 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "a9054441181f4a8abb031d6843529936" candidate_term: 2 candidate_status { last_received { term: 1 index: 4 } } ignore_live_leader: false dest_uuid: "0cee234f25ab4c62a65050963ad4c5a7" is_pre_election: true
I20250411 13:57:46.370256 17556 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "a9054441181f4a8abb031d6843529936" candidate_term: 2 candidate_status { last_received { term: 1 index: 4 } } ignore_live_leader: false dest_uuid: "ae43c70baafa4136a45e1aa583fda7b1" is_pre_election: true
I20250411 13:57:46.370963 17480 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 1 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate a9054441181f4a8abb031d6843529936 in term 1.
I20250411 13:57:46.371006 17556 raft_consensus.cc:2466] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 1 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate a9054441181f4a8abb031d6843529936 in term 1.
I20250411 13:57:46.371865 17378 leader_election.cc:304] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [CANDIDATE]: Term 2 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 0cee234f25ab4c62a65050963ad4c5a7, a9054441181f4a8abb031d6843529936; no voters: 
I20250411 13:57:46.372447 17832 raft_consensus.cc:2802] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 1 FOLLOWER]: Leader pre-election won for term 2
I20250411 13:57:46.372718 17832 raft_consensus.cc:491] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 1 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:57:46.372948 17832 raft_consensus.cc:3058] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 1 FOLLOWER]: Advancing to term 2
I20250411 13:57:46.377450 17832 raft_consensus.cc:513] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 2 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:46.378787 17832 leader_election.cc:290] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [CANDIDATE]: Term 2 election: Requested vote from peers 0cee234f25ab4c62a65050963ad4c5a7 (127.15.113.61:32907), ae43c70baafa4136a45e1aa583fda7b1 (127.15.113.60:39983)
I20250411 13:57:46.379477 17480 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "a9054441181f4a8abb031d6843529936" candidate_term: 2 candidate_status { last_received { term: 1 index: 4 } } ignore_live_leader: false dest_uuid: "0cee234f25ab4c62a65050963ad4c5a7"
I20250411 13:57:46.379673 17556 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "a9054441181f4a8abb031d6843529936" candidate_term: 2 candidate_status { last_received { term: 1 index: 4 } } ignore_live_leader: false dest_uuid: "ae43c70baafa4136a45e1aa583fda7b1"
I20250411 13:57:46.379918 17480 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 1 FOLLOWER]: Advancing to term 2
I20250411 13:57:46.380173 17556 raft_consensus.cc:3058] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 1 FOLLOWER]: Advancing to term 2
I20250411 13:57:46.384490 17480 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 2 FOLLOWER]: Leader election vote request: Granting yes vote for candidate a9054441181f4a8abb031d6843529936 in term 2.
I20250411 13:57:46.384490 17556 raft_consensus.cc:2466] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 2 FOLLOWER]: Leader election vote request: Granting yes vote for candidate a9054441181f4a8abb031d6843529936 in term 2.
I20250411 13:57:46.385644 17378 leader_election.cc:304] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [CANDIDATE]: Term 2 election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 0cee234f25ab4c62a65050963ad4c5a7, a9054441181f4a8abb031d6843529936; no voters: 
I20250411 13:57:46.386253 17832 raft_consensus.cc:2802] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 2 FOLLOWER]: Leader election won for term 2
I20250411 13:57:46.387292 17832 raft_consensus.cc:695] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 2 LEADER]: Becoming Leader. State: Replica: a9054441181f4a8abb031d6843529936, State: Running, Role: LEADER
I20250411 13:57:46.387913 17832 consensus_queue.cc:237] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 4, Committed index: 4, Last appended: 1.4, Last appended by leader: 4, Current term: 2, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:46.391085 17837 sys_catalog.cc:455] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [sys.catalog]: SysCatalogTable state changed. Reason: New leader a9054441181f4a8abb031d6843529936. Latest consensus state: current_term: 2 leader_uuid: "a9054441181f4a8abb031d6843529936" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } } }
I20250411 13:57:46.391777 17837 sys_catalog.cc:458] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:46.392992 17839 catalog_manager.cc:1477] Loading table and tablet metadata into memory...
I20250411 13:57:46.397912 17839 catalog_manager.cc:1486] Initializing Kudu cluster ID...
I20250411 13:57:46.400358 17839 catalog_manager.cc:1261] Loaded cluster ID: 4cf7324170fb47c9b1bba0891a8eebdf
I20250411 13:57:46.400555 17839 catalog_manager.cc:1497] Initializing Kudu internal certificate authority...
I20250411 13:57:46.403403 17839 catalog_manager.cc:1506] Loading token signing keys...
I20250411 13:57:46.405872 17839 catalog_manager.cc:5965] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: Loaded TSK: 0
I20250411 13:57:46.406536 17839 catalog_manager.cc:1516] Initializing in-progress tserver states...
I20250411 13:57:46.439075 15812 tablet_server.cc:178] TabletServer@127.15.113.1:0 shutting down...
I20250411 13:57:46.447396 17394 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.0.0.1:45454
I20250411 13:57:46.465718 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:46.481380 15812 tablet_server.cc:195] TabletServer@127.15.113.1:0 shutdown complete.
I20250411 13:57:46.488344 15812 tablet_server.cc:178] TabletServer@127.15.113.2:0 shutting down...
I20250411 13:57:46.501500 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:46.516744 15812 tablet_server.cc:195] TabletServer@127.15.113.2:0 shutdown complete.
I20250411 13:57:46.523947 15812 tablet_server.cc:178] TabletServer@127.15.113.3:0 shutting down...
I20250411 13:57:46.537515 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:46.552973 15812 tablet_server.cc:195] TabletServer@127.15.113.3:0 shutdown complete.
I20250411 13:57:46.559880 15812 master.cc:561] Master@127.15.113.62:46181 shutting down...
I20250411 13:57:46.571949 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 2 LEADER]: Raft consensus shutting down.
I20250411 13:57:46.572535 15812 pending_rounds.cc:70] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: Trying to abort 1 pending ops.
I20250411 13:57:46.572721 15812 pending_rounds.cc:77] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: Aborting op as it isn't in flight: id { term: 2 index: 5 } timestamp: 7144979932730720256 op_type: NO_OP noop_request { }
I20250411 13:57:46.572990 15812 raft_consensus.cc:2887] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 2 LEADER]: NO_OP replication failed: Aborted: Op aborted
I20250411 13:57:46.573225 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 2 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:46.573603 15812 tablet_replica.cc:331] stopping tablet replica
I20250411 13:57:46.591081 15812 master.cc:583] Master@127.15.113.62:46181 shutdown complete.
I20250411 13:57:46.600883 15812 master.cc:561] Master@127.15.113.61:32907 shutting down...
I20250411 13:57:46.613310 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 2 FOLLOWER]: Raft consensus shutting down.
I20250411 13:57:46.613775 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 2 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:46.614059 15812 tablet_replica.cc:331] stopping tablet replica
I20250411 13:57:46.631417 15812 master.cc:583] Master@127.15.113.61:32907 shutdown complete.
I20250411 13:57:46.640805 15812 master.cc:561] Master@127.15.113.60:39983 shutting down...
I20250411 13:57:46.654116 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 2 FOLLOWER]: Raft consensus shutting down.
I20250411 13:57:46.654608 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 2 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:46.654948 15812 tablet_replica.cc:331] stopping tablet replica
I20250411 13:57:46.672351 15812 master.cc:583] Master@127.15.113.60:39983 shutdown complete.
I20250411 13:57:46.681882 17840 master_replication-itest.cc:118] Sleeping for 1000 ms...
I20250411 13:57:47.682427 17840 master_replication-itest.cc:120] Attempting to start the cluster...
I20250411 13:57:47.683809 17840 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:47.688853 17849 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:47.689899 17850 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:47.691326 17852 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:47.691903 17840 server_base.cc:1034] running on GCE node
I20250411 13:57:47.692727 17840 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:47.692929 17840 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:47.693086 17840 hybrid_clock.cc:648] HybridClock initialized: now 1744379867693065 us; error 0 us; skew 500 ppm
I20250411 13:57:47.693725 17840 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:47.697961 17840 webserver.cc:466] Webserver started at http://127.15.113.62:39159/ using document root <none> and password file <none>
I20250411 13:57:47.698487 17840 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:47.698681 17840 fs_manager.cc:365] Using existing metadata directory in first data directory
I20250411 13:57:47.702353 17840 fs_manager.cc:714] Time spent opening directory manager: real 0.003s	user 0.000s	sys 0.003s
I20250411 13:57:47.705268 17857 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:47.706032 17840 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.000s	sys 0.002s
I20250411 13:57:47.706312 17840 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-0-root
uuid: "a9054441181f4a8abb031d6843529936"
format_stamp: "Formatted at 2025-04-11 13:57:42 on dist-test-slave-jcj2"
I20250411 13:57:47.706609 17840 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-0-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-0-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-0-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:47.726673 17840 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:47.727870 17840 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:47.766485 17840 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.62:46181
I20250411 13:57:47.766553 17908 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.62:46181 every 8 connection(s)
I20250411 13:57:47.770834 17840 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:47.775521 17909 sys_catalog.cc:263] Verifying existing consensus state
W20250411 13:57:47.775969 17911 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:47.777129 17912 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:47.779927 17840 server_base.cc:1034] running on GCE node
W20250411 13:57:47.780311 17914 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:47.781387 17840 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:47.781646 17840 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:47.781829 17840 hybrid_clock.cc:648] HybridClock initialized: now 1744379867781811 us; error 0 us; skew 500 ppm
I20250411 13:57:47.782439 17840 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:47.782829 17909 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: Bootstrap starting.
I20250411 13:57:47.785147 17840 webserver.cc:466] Webserver started at http://127.15.113.61:36151/ using document root <none> and password file <none>
I20250411 13:57:47.785707 17840 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:47.785894 17840 fs_manager.cc:365] Using existing metadata directory in first data directory
I20250411 13:57:47.794718 17840 fs_manager.cc:714] Time spent opening directory manager: real 0.008s	user 0.000s	sys 0.009s
I20250411 13:57:47.799497 17921 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:47.800460 17840 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.003s	sys 0.001s
I20250411 13:57:47.800777 17840 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-1-root
uuid: "0cee234f25ab4c62a65050963ad4c5a7"
format_stamp: "Formatted at 2025-04-11 13:57:42 on dist-test-slave-jcj2"
I20250411 13:57:47.801076 17840 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-1-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-1-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-1-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:47.810287 17909 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: Bootstrap replayed 1/1 log segments. Stats: ops{read=5 overwritten=0 applied=4 ignored=0} inserts{seen=3 ignored=0} mutations{seen=0 ignored=0} orphaned_commits=0. Pending: 1 replicates
I20250411 13:57:47.811144 17909 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: Bootstrap complete.
I20250411 13:57:47.813216 17909 raft_consensus.cc:357] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 2 FOLLOWER]: Replica starting. Triggering 1 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:47.813915 17909 raft_consensus.cc:738] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 2 FOLLOWER]: Becoming Follower/Learner. State: Replica: a9054441181f4a8abb031d6843529936, State: Initialized, Role: FOLLOWER
I20250411 13:57:47.814460 17909 consensus_queue.cc:260] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 4, Last appended: 2.5, Last appended by leader: 5, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:47.816278 17926 sys_catalog.cc:455] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 2 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } } }
I20250411 13:57:47.817008 17926 sys_catalog.cc:458] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:47.817832 17909 sys_catalog.cc:564] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:47.820389 17840 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:47.821503 17840 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:47.835204 17939 catalog_manager.cc:1261] Loaded cluster ID: 4cf7324170fb47c9b1bba0891a8eebdf
I20250411 13:57:47.835522 17939 catalog_manager.cc:1554] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: loading cluster ID for follower catalog manager: success
I20250411 13:57:47.841490 17939 catalog_manager.cc:1576] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: acquiring CA information for follower catalog manager: success
I20250411 13:57:47.845860 17939 catalog_manager.cc:1604] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: importing token verification keys for follower catalog manager: success; most recent TSK sequence number 0
I20250411 13:57:47.865444 17840 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.61:32907
I20250411 13:57:47.865514 17984 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.61:32907 every 8 connection(s)
I20250411 13:57:47.869576 17840 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:47.875056 17987 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:47.875264 17985 sys_catalog.cc:263] Verifying existing consensus state
W20250411 13:57:47.876473 17988 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:47.880026 17990 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:47.885082 17840 server_base.cc:1034] running on GCE node
I20250411 13:57:47.885928 17985 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7: Bootstrap starting.
I20250411 13:57:47.886758 17840 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:47.887128 17840 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:47.887342 17840 hybrid_clock.cc:648] HybridClock initialized: now 1744379867887328 us; error 0 us; skew 500 ppm
I20250411 13:57:47.888235 17840 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:47.896046 17840 webserver.cc:466] Webserver started at http://127.15.113.60:42219/ using document root <none> and password file <none>
I20250411 13:57:47.896754 17840 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:47.897003 17840 fs_manager.cc:365] Using existing metadata directory in first data directory
I20250411 13:57:47.901939 17840 fs_manager.cc:714] Time spent opening directory manager: real 0.004s	user 0.005s	sys 0.000s
I20250411 13:57:47.908259 17997 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:47.909317 17840 fs_manager.cc:730] Time spent opening block manager: real 0.005s	user 0.001s	sys 0.002s
I20250411 13:57:47.909706 17840 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-2-root
uuid: "ae43c70baafa4136a45e1aa583fda7b1"
format_stamp: "Formatted at 2025-04-11 13:57:43 on dist-test-slave-jcj2"
I20250411 13:57:47.910092 17840 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-2-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-2-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/master-2-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:47.912297 17985 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7: Bootstrap replayed 1/1 log segments. Stats: ops{read=4 overwritten=0 applied=4 ignored=0} inserts{seen=3 ignored=0} mutations{seen=0 ignored=0} orphaned_commits=0. Pending: 0 replicates
I20250411 13:57:47.913718 17985 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7: Bootstrap complete.
I20250411 13:57:47.916971 17985 raft_consensus.cc:357] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 2 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:47.917591 17985 raft_consensus.cc:738] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 2 FOLLOWER]: Becoming Follower/Learner. State: Replica: 0cee234f25ab4c62a65050963ad4c5a7, State: Initialized, Role: FOLLOWER
I20250411 13:57:47.918255 17985 consensus_queue.cc:260] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 4, Last appended: 1.4, Last appended by leader: 4, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:47.920042 18002 sys_catalog.cc:455] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 2 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } } }
I20250411 13:57:47.920581 18002 sys_catalog.cc:458] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:47.921586 17985 sys_catalog.cc:564] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:47.936324 18013 catalog_manager.cc:1261] Loaded cluster ID: 4cf7324170fb47c9b1bba0891a8eebdf
I20250411 13:57:47.936599 18013 catalog_manager.cc:1554] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7: loading cluster ID for follower catalog manager: success
I20250411 13:57:47.940316 17840 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:47.940486 18013 catalog_manager.cc:1576] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7: acquiring CA information for follower catalog manager: success
I20250411 13:57:47.941462 17840 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:47.943583 18013 catalog_manager.cc:1604] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7: importing token verification keys for follower catalog manager: success; most recent TSK sequence number 0
I20250411 13:57:47.979872 17840 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.60:39983
I20250411 13:57:47.979967 18060 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.60:39983 every 8 connection(s)
I20250411 13:57:47.983196 17840 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 0
I20250411 13:57:47.983493 17840 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 1
I20250411 13:57:47.983664 17840 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 2
I20250411 13:57:47.988226 18061 sys_catalog.cc:263] Verifying existing consensus state
I20250411 13:57:47.992274 18061 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1: Bootstrap starting.
I20250411 13:57:48.011096 18061 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1: Bootstrap replayed 1/1 log segments. Stats: ops{read=4 overwritten=0 applied=4 ignored=0} inserts{seen=3 ignored=0} mutations{seen=0 ignored=0} orphaned_commits=0. Pending: 0 replicates
I20250411 13:57:48.011888 18061 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1: Bootstrap complete.
I20250411 13:57:48.014021 18061 raft_consensus.cc:357] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 2 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:48.014528 18061 raft_consensus.cc:738] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 2 FOLLOWER]: Becoming Follower/Learner. State: Replica: ae43c70baafa4136a45e1aa583fda7b1, State: Initialized, Role: FOLLOWER
I20250411 13:57:48.015121 18061 consensus_queue.cc:260] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 4, Last appended: 1.4, Last appended by leader: 4, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:48.016669 18064 sys_catalog.cc:455] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 2 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } } }
I20250411 13:57:48.017187 18064 sys_catalog.cc:458] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:48.017949 18061 sys_catalog.cc:564] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:48.034431 17840 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:48.037515 18076 catalog_manager.cc:1261] Loaded cluster ID: 4cf7324170fb47c9b1bba0891a8eebdf
I20250411 13:57:48.037815 18076 catalog_manager.cc:1554] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1: loading cluster ID for follower catalog manager: success
W20250411 13:57:48.041297 18077 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:48.041842 18078 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:48.044420 18080 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:48.044713 17840 server_base.cc:1034] running on GCE node
I20250411 13:57:48.044708 18076 catalog_manager.cc:1576] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1: acquiring CA information for follower catalog manager: success
I20250411 13:57:48.045786 17840 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:48.046000 17840 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:48.046164 17840 hybrid_clock.cc:648] HybridClock initialized: now 1744379868046143 us; error 0 us; skew 500 ppm
I20250411 13:57:48.046737 17840 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:48.048221 18076 catalog_manager.cc:1604] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1: importing token verification keys for follower catalog manager: success; most recent TSK sequence number 0
I20250411 13:57:48.049357 17840 webserver.cc:466] Webserver started at http://127.15.113.1:43579/ using document root <none> and password file <none>
I20250411 13:57:48.049834 17840 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:48.049993 17840 fs_manager.cc:365] Using existing metadata directory in first data directory
I20250411 13:57:48.053074 17840 fs_manager.cc:714] Time spent opening directory manager: real 0.002s	user 0.003s	sys 0.000s
I20250411 13:57:48.055497 18085 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:48.056238 17840 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.002s	sys 0.000s
I20250411 13:57:48.056499 17840 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-0-root
uuid: "ace3562fd6d14838b31ec3b70d1bc687"
format_stamp: "Formatted at 2025-04-11 13:57:43 on dist-test-slave-jcj2"
I20250411 13:57:48.056769 17840 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-0-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-0-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-0-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:48.068138 17840 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:48.069100 17840 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:48.070394 17840 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:48.072686 17840 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:48.072876 17840 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:48.073089 17840 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:48.073237 17840 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:48.111810 17840 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.1:35551
I20250411 13:57:48.111927 18147 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.1:35551 every 8 connection(s)
I20250411 13:57:48.128345 17840 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:48.133005 18148 heartbeater.cc:344] Connected to a master server at 127.15.113.60:39983
I20250411 13:57:48.133483 18148 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:48.134640 18148 heartbeater.cc:507] Master 127.15.113.60:39983 requested a full tablet report, sending...
I20250411 13:57:48.139102 18026 ts_manager.cc:194] Registered new tserver with Master: ace3562fd6d14838b31ec3b70d1bc687 (127.15.113.1:35551)
I20250411 13:57:48.142962 18150 heartbeater.cc:344] Connected to a master server at 127.15.113.62:46181
I20250411 13:57:48.143433 18150 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:48.144282 18150 heartbeater.cc:507] Master 127.15.113.62:46181 requested a full tablet report, sending...
W20250411 13:57:48.142990 18155 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:48.147055 17874 ts_manager.cc:194] Registered new tserver with Master: ace3562fd6d14838b31ec3b70d1bc687 (127.15.113.1:35551)
I20250411 13:57:48.147421 18149 heartbeater.cc:344] Connected to a master server at 127.15.113.61:32907
I20250411 13:57:48.147778 18149 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:48.148646 18149 heartbeater.cc:507] Master 127.15.113.61:32907 requested a full tablet report, sending...
W20250411 13:57:48.150920 18156 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:48.151319 17950 ts_manager.cc:194] Registered new tserver with Master: ace3562fd6d14838b31ec3b70d1bc687 (127.15.113.1:35551)
W20250411 13:57:48.151854 18158 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:48.152199 17840 server_base.cc:1034] running on GCE node
I20250411 13:57:48.153178 17840 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:48.153434 17840 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:48.153611 17840 hybrid_clock.cc:648] HybridClock initialized: now 1744379868153589 us; error 0 us; skew 500 ppm
I20250411 13:57:48.154103 17840 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:48.156494 17840 webserver.cc:466] Webserver started at http://127.15.113.2:34603/ using document root <none> and password file <none>
I20250411 13:57:48.156961 17840 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:48.157135 17840 fs_manager.cc:365] Using existing metadata directory in first data directory
I20250411 13:57:48.160395 17840 fs_manager.cc:714] Time spent opening directory manager: real 0.003s	user 0.003s	sys 0.000s
I20250411 13:57:48.162917 18163 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:48.163600 17840 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.002s	sys 0.000s
I20250411 13:57:48.163882 17840 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-1-root
uuid: "79f6c6bf17ed495d9177207734910400"
format_stamp: "Formatted at 2025-04-11 13:57:43 on dist-test-slave-jcj2"
I20250411 13:57:48.164146 17840 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-1-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-1-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-1-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:48.190703 17840 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:48.191726 17840 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:48.193035 17840 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:48.195363 17840 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:48.195569 17840 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:48.195830 17840 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:48.196035 17840 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:48.233335 17840 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.2:44943
I20250411 13:57:48.233450 18225 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.2:44943 every 8 connection(s)
I20250411 13:57:48.239662 17840 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:48.257808 18228 heartbeater.cc:344] Connected to a master server at 127.15.113.62:46181
I20250411 13:57:48.258296 18228 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:48.259208 18228 heartbeater.cc:507] Master 127.15.113.62:46181 requested a full tablet report, sending...
I20250411 13:57:48.262141 17873 ts_manager.cc:194] Registered new tserver with Master: 79f6c6bf17ed495d9177207734910400 (127.15.113.2:44943)
I20250411 13:57:48.267908 18226 heartbeater.cc:344] Connected to a master server at 127.15.113.60:39983
I20250411 13:57:48.268450 18226 heartbeater.cc:461] Registering TS with master...
W20250411 13:57:48.268946 18233 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:48.269546 18227 heartbeater.cc:344] Connected to a master server at 127.15.113.61:32907
I20250411 13:57:48.269596 18226 heartbeater.cc:507] Master 127.15.113.60:39983 requested a full tablet report, sending...
I20250411 13:57:48.270144 18227 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:48.271155 18227 heartbeater.cc:507] Master 127.15.113.61:32907 requested a full tablet report, sending...
I20250411 13:57:48.273479 18026 ts_manager.cc:194] Registered new tserver with Master: 79f6c6bf17ed495d9177207734910400 (127.15.113.2:44943)
W20250411 13:57:48.273765 18234 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:48.276681 17950 ts_manager.cc:194] Registered new tserver with Master: 79f6c6bf17ed495d9177207734910400 (127.15.113.2:44943)
I20250411 13:57:48.280372 17840 server_base.cc:1034] running on GCE node
W20250411 13:57:48.280727 18236 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:48.281580 17840 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:48.281807 17840 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:48.281978 17840 hybrid_clock.cc:648] HybridClock initialized: now 1744379868281963 us; error 0 us; skew 500 ppm
I20250411 13:57:48.282441 17840 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:48.284890 17840 webserver.cc:466] Webserver started at http://127.15.113.3:40579/ using document root <none> and password file <none>
I20250411 13:57:48.285360 17840 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:48.285538 17840 fs_manager.cc:365] Using existing metadata directory in first data directory
I20250411 13:57:48.288805 17840 fs_manager.cc:714] Time spent opening directory manager: real 0.002s	user 0.003s	sys 0.000s
I20250411 13:57:48.291369 18241 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:48.292052 17840 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.001s	sys 0.001s
I20250411 13:57:48.292357 17840 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-2-root
uuid: "43d31dfb5f814ee1970e1e91348e55a8"
format_stamp: "Formatted at 2025-04-11 13:57:43 on dist-test-slave-jcj2"
I20250411 13:57:48.292757 17840 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-2-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-2-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestCycleThroughAllMasters.1744379822517842-15812-0/minicluster-data/ts-2-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:48.302371 17840 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:48.303515 17840 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:48.304684 17840 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:48.306648 17840 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:48.306818 17840 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:48.307046 17840 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:48.307174 17840 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:48.347950 17840 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.3:35539
I20250411 13:57:48.348037 18303 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.3:35539 every 8 connection(s)
I20250411 13:57:48.366667 18304 heartbeater.cc:344] Connected to a master server at 127.15.113.60:39983
I20250411 13:57:48.367101 18304 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:48.367825 18304 heartbeater.cc:507] Master 127.15.113.60:39983 requested a full tablet report, sending...
I20250411 13:57:48.368180 18306 heartbeater.cc:344] Connected to a master server at 127.15.113.62:46181
I20250411 13:57:48.368568 18306 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:48.369423 18306 heartbeater.cc:507] Master 127.15.113.62:46181 requested a full tablet report, sending...
I20250411 13:57:48.370138 18026 ts_manager.cc:194] Registered new tserver with Master: 43d31dfb5f814ee1970e1e91348e55a8 (127.15.113.3:35539)
I20250411 13:57:48.371718 17874 ts_manager.cc:194] Registered new tserver with Master: 43d31dfb5f814ee1970e1e91348e55a8 (127.15.113.3:35539)
I20250411 13:57:48.372325 18305 heartbeater.cc:344] Connected to a master server at 127.15.113.61:32907
I20250411 13:57:48.372709 18305 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:48.373453 18305 heartbeater.cc:507] Master 127.15.113.61:32907 requested a full tablet report, sending...
I20250411 13:57:48.375491 17950 ts_manager.cc:194] Registered new tserver with Master: 43d31dfb5f814ee1970e1e91348e55a8 (127.15.113.3:35539)
I20250411 13:57:48.376449 17840 internal_mini_cluster.cc:371] 3 TS(s) registered with all masters after 0.020133196s
I20250411 13:57:49.380323 18312 raft_consensus.cc:491] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 2 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:57:49.380745 18312 raft_consensus.cc:513] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 2 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:49.382548 18312 leader_election.cc:290] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [CANDIDATE]: Term 3 pre-election: Requested pre-vote from peers a9054441181f4a8abb031d6843529936 (127.15.113.62:46181), 0cee234f25ab4c62a65050963ad4c5a7 (127.15.113.61:32907)
I20250411 13:57:49.392407 17960 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "ae43c70baafa4136a45e1aa583fda7b1" candidate_term: 3 candidate_status { last_received { term: 1 index: 4 } } ignore_live_leader: false dest_uuid: "0cee234f25ab4c62a65050963ad4c5a7" is_pre_election: true
I20250411 13:57:49.392445 17884 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "ae43c70baafa4136a45e1aa583fda7b1" candidate_term: 3 candidate_status { last_received { term: 1 index: 4 } } ignore_live_leader: false dest_uuid: "a9054441181f4a8abb031d6843529936" is_pre_election: true
I20250411 13:57:49.393100 17960 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 2 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate ae43c70baafa4136a45e1aa583fda7b1 in term 2.
I20250411 13:57:49.393287 17884 raft_consensus.cc:2408] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 2 FOLLOWER]: Leader pre-election vote request: Denying vote to candidate ae43c70baafa4136a45e1aa583fda7b1 for term 3 because replica has last-logged OpId of term: 2 index: 5, which is greater than that of the candidate, which has last-logged OpId of term: 1 index: 4.
I20250411 13:57:49.394115 17998 leader_election.cc:304] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [CANDIDATE]: Term 3 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 0cee234f25ab4c62a65050963ad4c5a7, ae43c70baafa4136a45e1aa583fda7b1; no voters: 
I20250411 13:57:49.394881 18312 raft_consensus.cc:2802] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 2 FOLLOWER]: Leader pre-election won for term 3
I20250411 13:57:49.395215 18312 raft_consensus.cc:491] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 2 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:57:49.395464 18312 raft_consensus.cc:3058] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 2 FOLLOWER]: Advancing to term 3
I20250411 13:57:49.399746 18312 raft_consensus.cc:513] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 3 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:49.401250 18312 leader_election.cc:290] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [CANDIDATE]: Term 3 election: Requested vote from peers a9054441181f4a8abb031d6843529936 (127.15.113.62:46181), 0cee234f25ab4c62a65050963ad4c5a7 (127.15.113.61:32907)
I20250411 13:57:49.402016 17884 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "ae43c70baafa4136a45e1aa583fda7b1" candidate_term: 3 candidate_status { last_received { term: 1 index: 4 } } ignore_live_leader: false dest_uuid: "a9054441181f4a8abb031d6843529936"
I20250411 13:57:49.402096 17960 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "ae43c70baafa4136a45e1aa583fda7b1" candidate_term: 3 candidate_status { last_received { term: 1 index: 4 } } ignore_live_leader: false dest_uuid: "0cee234f25ab4c62a65050963ad4c5a7"
I20250411 13:57:49.402531 17884 raft_consensus.cc:3058] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 2 FOLLOWER]: Advancing to term 3
I20250411 13:57:49.402602 17960 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 2 FOLLOWER]: Advancing to term 3
I20250411 13:57:49.407123 17960 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 3 FOLLOWER]: Leader election vote request: Granting yes vote for candidate ae43c70baafa4136a45e1aa583fda7b1 in term 3.
I20250411 13:57:49.407323 17884 raft_consensus.cc:2408] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 3 FOLLOWER]: Leader election vote request: Denying vote to candidate ae43c70baafa4136a45e1aa583fda7b1 for term 3 because replica has last-logged OpId of term: 2 index: 5, which is greater than that of the candidate, which has last-logged OpId of term: 1 index: 4.
I20250411 13:57:49.408054 17998 leader_election.cc:304] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [CANDIDATE]: Term 3 election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 0cee234f25ab4c62a65050963ad4c5a7, ae43c70baafa4136a45e1aa583fda7b1; no voters: 
I20250411 13:57:49.408758 18312 raft_consensus.cc:2802] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 3 FOLLOWER]: Leader election won for term 3
I20250411 13:57:49.409813 18312 raft_consensus.cc:695] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 3 LEADER]: Becoming Leader. State: Replica: ae43c70baafa4136a45e1aa583fda7b1, State: Running, Role: LEADER
I20250411 13:57:49.410429 18312 consensus_queue.cc:237] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 4, Committed index: 4, Last appended: 1.4, Last appended by leader: 4, Current term: 3, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } }
I20250411 13:57:49.414052 18317 sys_catalog.cc:455] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [sys.catalog]: SysCatalogTable state changed. Reason: New leader ae43c70baafa4136a45e1aa583fda7b1. Latest consensus state: current_term: 3 leader_uuid: "ae43c70baafa4136a45e1aa583fda7b1" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a9054441181f4a8abb031d6843529936" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 46181 } } peers { permanent_uuid: "0cee234f25ab4c62a65050963ad4c5a7" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 32907 } } peers { permanent_uuid: "ae43c70baafa4136a45e1aa583fda7b1" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39983 } } }
I20250411 13:57:49.414780 18317 sys_catalog.cc:458] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:49.416131 18319 catalog_manager.cc:1477] Loading table and tablet metadata into memory...
I20250411 13:57:49.421113 18319 catalog_manager.cc:1486] Initializing Kudu cluster ID...
I20250411 13:57:49.423662 18319 catalog_manager.cc:1261] Loaded cluster ID: 4cf7324170fb47c9b1bba0891a8eebdf
I20250411 13:57:49.423890 18319 catalog_manager.cc:1497] Initializing Kudu internal certificate authority...
I20250411 13:57:49.426889 18319 catalog_manager.cc:1506] Loading token signing keys...
I20250411 13:57:49.429404 18319 catalog_manager.cc:5965] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1: Loaded TSK: 0
I20250411 13:57:49.430115 18319 catalog_manager.cc:1516] Initializing in-progress tserver states...
I20250411 13:57:49.441479 15812 tablet_server.cc:178] TabletServer@127.15.113.1:0 shutting down...
I20250411 13:57:49.461683 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:49.477300 15812 tablet_server.cc:195] TabletServer@127.15.113.1:0 shutdown complete.
I20250411 13:57:49.484231 15812 tablet_server.cc:178] TabletServer@127.15.113.2:0 shutting down...
I20250411 13:57:49.499495 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:49.514724 15812 tablet_server.cc:195] TabletServer@127.15.113.2:0 shutdown complete.
I20250411 13:57:49.521695 15812 tablet_server.cc:178] TabletServer@127.15.113.3:0 shutting down...
I20250411 13:57:49.537056 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:49.552294 15812 tablet_server.cc:195] TabletServer@127.15.113.3:0 shutdown complete.
I20250411 13:57:49.559195 15812 master.cc:561] Master@127.15.113.62:46181 shutting down...
I20250411 13:57:49.571393 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 3 FOLLOWER]: Raft consensus shutting down.
I20250411 13:57:49.571774 15812 pending_rounds.cc:70] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: Trying to abort 1 pending ops.
I20250411 13:57:49.571898 15812 pending_rounds.cc:77] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936: Aborting op as it isn't in flight: id { term: 2 index: 5 } timestamp: 7144979932730720256 op_type: NO_OP noop_request { }
I20250411 13:57:49.572108 15812 raft_consensus.cc:2887] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 3 FOLLOWER]: NO_OP replication failed: Aborted: Op aborted
I20250411 13:57:49.572302 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P a9054441181f4a8abb031d6843529936 [term 3 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:49.572532 15812 tablet_replica.cc:331] stopping tablet replica
I20250411 13:57:49.589382 15812 master.cc:583] Master@127.15.113.62:46181 shutdown complete.
I20250411 13:57:49.599064 15812 master.cc:561] Master@127.15.113.61:32907 shutting down...
I20250411 13:57:49.611469 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 3 FOLLOWER]: Raft consensus shutting down.
I20250411 13:57:49.611917 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P 0cee234f25ab4c62a65050963ad4c5a7 [term 3 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:49.612191 15812 tablet_replica.cc:331] stopping tablet replica
I20250411 13:57:49.629168 15812 master.cc:583] Master@127.15.113.61:32907 shutdown complete.
I20250411 13:57:49.638725 15812 master.cc:561] Master@127.15.113.60:39983 shutting down...
I20250411 13:57:49.650275 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 3 LEADER]: Raft consensus shutting down.
I20250411 13:57:49.650897 15812 pending_rounds.cc:70] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1: Trying to abort 1 pending ops.
I20250411 13:57:49.651084 15812 pending_rounds.cc:77] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1: Aborting op as it isn't in flight: id { term: 3 index: 5 } timestamp: 7144979945111994368 op_type: NO_OP noop_request { }
I20250411 13:57:49.651330 15812 raft_consensus.cc:2887] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 3 LEADER]: NO_OP replication failed: Aborted: Op aborted
I20250411 13:57:49.651569 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P ae43c70baafa4136a45e1aa583fda7b1 [term 3 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:49.651942 15812 tablet_replica.cc:331] stopping tablet replica
I20250411 13:57:49.669581 15812 master.cc:583] Master@127.15.113.60:39983 shutdown complete.
[       OK ] MasterReplicationTest.TestCycleThroughAllMasters (6834 ms)
[ RUN      ] MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster
I20250411 13:57:49.690435 15812 internal_mini_cluster.cc:156] Creating distributed mini masters. Addrs: 127.15.113.62:38391,127.15.113.61:43265,127.15.113.60:36095
I20250411 13:57:49.691668 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:49.695937 18320 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:49.697479 18321 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:49.698401 18323 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:49.699349 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:49.700074 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:49.700237 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:49.700345 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379869700335 us; error 0 us; skew 500 ppm
I20250411 13:57:49.700768 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:49.702848 15812 webserver.cc:466] Webserver started at http://127.15.113.62:40235/ using document root <none> and password file <none>
I20250411 13:57:49.703299 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:49.703447 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:49.703640 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:49.704563 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/master-0-root/instance:
uuid: "a48ffb81d16f42fe8310518e1577cb06"
format_stamp: "Formatted at 2025-04-11 13:57:49 on dist-test-slave-jcj2"
I20250411 13:57:49.708742 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.003s	sys 0.002s
I20250411 13:57:49.711720 18328 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:49.712415 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.003s	sys 0.000s
I20250411 13:57:49.712677 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/master-0-root
uuid: "a48ffb81d16f42fe8310518e1577cb06"
format_stamp: "Formatted at 2025-04-11 13:57:49 on dist-test-slave-jcj2"
I20250411 13:57:49.712935 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/master-0-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/master-0-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/master-0-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:49.729717 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:49.730674 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:49.763456 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.62:38391
I20250411 13:57:49.763595 18379 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.62:38391 every 8 connection(s)
I20250411 13:57:49.767022 18380 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:49.767156 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:49.771965 18382 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:49.773639 18383 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:49.773787 18380 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:57:49.776748 18385 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:49.778213 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:49.778867 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:49.779081 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:49.779227 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379869779210 us; error 0 us; skew 500 ppm
I20250411 13:57:49.779724 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:49.782250 15812 webserver.cc:466] Webserver started at http://127.15.113.61:41713/ using document root <none> and password file <none>
I20250411 13:57:49.782696 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:49.782922 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:49.783166 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:49.784287 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/master-1-root/instance:
uuid: "fdfc4352b255464f810d4e46e7ae440e"
format_stamp: "Formatted at 2025-04-11 13:57:49 on dist-test-slave-jcj2"
I20250411 13:57:49.786835 18380 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:49.788810 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.005s	sys 0.000s
W20250411 13:57:49.789109 18329 proxy.cc:239] Call had error, refreshing address and retrying: Network error: Client connection negotiation failed: client connection to 127.15.113.61:43265: connect: Connection refused (error 111) [suppressed 7 similar messages]
W20250411 13:57:49.791947 18380 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:43265: Network error: Client connection negotiation failed: client connection to 127.15.113.61:43265: connect: Connection refused (error 111)
I20250411 13:57:49.792523 18393 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:49.793222 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.001s	sys 0.000s
I20250411 13:57:49.793498 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/master-1-root
uuid: "fdfc4352b255464f810d4e46e7ae440e"
format_stamp: "Formatted at 2025-04-11 13:57:49 on dist-test-slave-jcj2"
I20250411 13:57:49.793742 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/master-1-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/master-1-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/master-1-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:49.809486 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:49.810389 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:49.821483 18380 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } attempt: 1
W20250411 13:57:49.825985 18380 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:43265: Network error: Client connection negotiation failed: client connection to 127.15.113.61:43265: connect: Connection refused (error 111)
I20250411 13:57:49.844182 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.61:43265
I20250411 13:57:49.844261 18444 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.61:43265 every 8 connection(s)
I20250411 13:57:49.847767 18445 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:49.847999 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:49.852972 18447 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:49.854424 18445 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:57:49.854611 18448 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:49.857429 18450 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:49.858686 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:49.859411 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:49.859680 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:49.859876 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379869859858 us; error 0 us; skew 500 ppm
I20250411 13:57:49.860478 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:49.863124 15812 webserver.cc:466] Webserver started at http://127.15.113.60:41147/ using document root <none> and password file <none>
I20250411 13:57:49.863651 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:49.863874 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:49.864184 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:49.865564 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/master-2-root/instance:
uuid: "9dd393f25aff447caff18546cac8b357"
format_stamp: "Formatted at 2025-04-11 13:57:49 on dist-test-slave-jcj2"
I20250411 13:57:49.866986 18445 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:49.871151 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.005s	user 0.007s	sys 0.000s
I20250411 13:57:49.875001 18457 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:49.875864 15812 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.001s	sys 0.002s
I20250411 13:57:49.876168 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/master-2-root
uuid: "9dd393f25aff447caff18546cac8b357"
format_stamp: "Formatted at 2025-04-11 13:57:49 on dist-test-slave-jcj2"
I20250411 13:57:49.876544 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/master-2-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/master-2-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/master-2-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:49.878149 18445 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:57:49.883607 18445 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:36095: Network error: Client connection negotiation failed: client connection to 127.15.113.60:36095: connect: Connection refused (error 111)
I20250411 13:57:49.903028 18445 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } attempt: 1
I20250411 13:57:49.905849 18380 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } attempt: 2
W20250411 13:57:49.907553 18445 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:36095: Network error: Client connection negotiation failed: client connection to 127.15.113.60:36095: connect: Connection refused (error 111)
I20250411 13:57:49.908321 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:49.909498 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:49.916271 18380 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:57:49.921214 18380 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:36095: Network error: Client connection negotiation failed: client connection to 127.15.113.60:36095: connect: Connection refused (error 111)
I20250411 13:57:49.946342 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.60:36095
I20250411 13:57:49.946425 18509 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.60:36095 every 8 connection(s)
I20250411 13:57:49.949146 15812 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 0
I20250411 13:57:49.950111 18510 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:49.955601 18510 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:49.965497 18510 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:49.973414 18510 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:49.973728 18380 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } attempt: 1
I20250411 13:57:49.982199 18445 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } attempt: 2
I20250411 13:57:49.993516 18380 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06: Bootstrap starting.
I20250411 13:57:49.993604 18510 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357: Bootstrap starting.
I20250411 13:57:49.997896 18380 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:49.999593 18510 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:50.000241 18445 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e: Bootstrap starting.
I20250411 13:57:50.001914 18380 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06: No bootstrap required, opened a new log
I20250411 13:57:50.004263 18510 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357: No bootstrap required, opened a new log
I20250411 13:57:50.004161 18380 raft_consensus.cc:357] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a48ffb81d16f42fe8310518e1577cb06" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } } peers { permanent_uuid: "fdfc4352b255464f810d4e46e7ae440e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } } peers { permanent_uuid: "9dd393f25aff447caff18546cac8b357" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } }
I20250411 13:57:50.004761 18380 raft_consensus.cc:383] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:50.005057 18380 raft_consensus.cc:738] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: a48ffb81d16f42fe8310518e1577cb06, State: Initialized, Role: FOLLOWER
I20250411 13:57:50.005621 18380 consensus_queue.cc:260] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a48ffb81d16f42fe8310518e1577cb06" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } } peers { permanent_uuid: "fdfc4352b255464f810d4e46e7ae440e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } } peers { permanent_uuid: "9dd393f25aff447caff18546cac8b357" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } }
I20250411 13:57:50.005903 18445 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:50.006953 18510 raft_consensus.cc:357] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a48ffb81d16f42fe8310518e1577cb06" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } } peers { permanent_uuid: "fdfc4352b255464f810d4e46e7ae440e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } } peers { permanent_uuid: "9dd393f25aff447caff18546cac8b357" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } }
I20250411 13:57:50.007684 18510 raft_consensus.cc:383] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:50.007400 18518 sys_catalog.cc:455] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a48ffb81d16f42fe8310518e1577cb06" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } } peers { permanent_uuid: "fdfc4352b255464f810d4e46e7ae440e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } } peers { permanent_uuid: "9dd393f25aff447caff18546cac8b357" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } } }
I20250411 13:57:50.008018 18510 raft_consensus.cc:738] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 9dd393f25aff447caff18546cac8b357, State: Initialized, Role: FOLLOWER
I20250411 13:57:50.008301 18518 sys_catalog.cc:458] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:50.009157 18380 sys_catalog.cc:564] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:50.008893 18510 consensus_queue.cc:260] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a48ffb81d16f42fe8310518e1577cb06" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } } peers { permanent_uuid: "fdfc4352b255464f810d4e46e7ae440e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } } peers { permanent_uuid: "9dd393f25aff447caff18546cac8b357" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } }
I20250411 13:57:50.011199 18445 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e: No bootstrap required, opened a new log
I20250411 13:57:50.010797 18520 sys_catalog.cc:455] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a48ffb81d16f42fe8310518e1577cb06" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } } peers { permanent_uuid: "fdfc4352b255464f810d4e46e7ae440e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } } peers { permanent_uuid: "9dd393f25aff447caff18546cac8b357" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } } }
I20250411 13:57:50.011639 18520 sys_catalog.cc:458] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:50.012415 18510 sys_catalog.cc:564] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:50.013996 18445 raft_consensus.cc:357] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a48ffb81d16f42fe8310518e1577cb06" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } } peers { permanent_uuid: "fdfc4352b255464f810d4e46e7ae440e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } } peers { permanent_uuid: "9dd393f25aff447caff18546cac8b357" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } }
I20250411 13:57:50.014693 18445 raft_consensus.cc:383] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:50.015022 18445 raft_consensus.cc:738] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: fdfc4352b255464f810d4e46e7ae440e, State: Initialized, Role: FOLLOWER
I20250411 13:57:50.015902 18445 consensus_queue.cc:260] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a48ffb81d16f42fe8310518e1577cb06" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } } peers { permanent_uuid: "fdfc4352b255464f810d4e46e7ae440e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } } peers { permanent_uuid: "9dd393f25aff447caff18546cac8b357" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } }
I20250411 13:57:50.023855 18525 sys_catalog.cc:455] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a48ffb81d16f42fe8310518e1577cb06" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } } peers { permanent_uuid: "fdfc4352b255464f810d4e46e7ae440e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } } peers { permanent_uuid: "9dd393f25aff447caff18546cac8b357" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } } }
I20250411 13:57:50.024646 18525 sys_catalog.cc:458] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:50.027326 18445 sys_catalog.cc:564] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:50.033205 15812 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 1
W20250411 13:57:50.034544 18539 catalog_manager.cc:1560] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
W20250411 13:57:50.035002 18539 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
W20250411 13:57:50.036494 18544 catalog_manager.cc:1560] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
W20250411 13:57:50.036739 18544 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
I20250411 13:57:50.042059 15812 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 2
I20250411 13:57:50.043385 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:50.044585 18554 catalog_manager.cc:1560] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
W20250411 13:57:50.044818 18554 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
W20250411 13:57:50.048504 18555 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:50.049716 18556 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:50.050961 18558 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:50.051103 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:50.051877 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:50.052091 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:50.052245 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379870052228 us; error 0 us; skew 500 ppm
I20250411 13:57:50.052722 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:50.055066 15812 webserver.cc:466] Webserver started at http://127.15.113.1:35967/ using document root <none> and password file <none>
I20250411 13:57:50.055526 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:50.055717 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:50.055963 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:50.056929 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/ts-0-root/instance:
uuid: "b56e9560918346fcabcd653d9cc821fe"
format_stamp: "Formatted at 2025-04-11 13:57:50 on dist-test-slave-jcj2"
I20250411 13:57:50.060829 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.003s	user 0.003s	sys 0.002s
I20250411 13:57:50.063885 18563 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:50.064625 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.003s	sys 0.000s
I20250411 13:57:50.064875 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/ts-0-root
uuid: "b56e9560918346fcabcd653d9cc821fe"
format_stamp: "Formatted at 2025-04-11 13:57:50 on dist-test-slave-jcj2"
I20250411 13:57:50.065136 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/ts-0-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/ts-0-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/ts-0-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:50.094151 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:50.095301 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:50.096740 15812 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:50.098986 15812 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:50.099157 15812 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:50.099422 15812 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:50.099574 15812 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:50.134124 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.1:39191
I20250411 13:57:50.134204 18625 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.1:39191 every 8 connection(s)
I20250411 13:57:50.141031 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:50.156955 18627 heartbeater.cc:344] Connected to a master server at 127.15.113.60:36095
I20250411 13:57:50.157415 18627 heartbeater.cc:461] Registering TS with master...
W20250411 13:57:50.158229 18634 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:50.158468 18627 heartbeater.cc:507] Master 127.15.113.60:36095 requested a full tablet report, sending...
W20250411 13:57:50.156893 18633 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:50.160305 18626 heartbeater.cc:344] Connected to a master server at 127.15.113.61:43265
I20250411 13:57:50.160787 18626 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:50.162039 18626 heartbeater.cc:507] Master 127.15.113.61:43265 requested a full tablet report, sending...
I20250411 13:57:50.162267 15812 server_base.cc:1034] running on GCE node
W20250411 13:57:50.163717 18636 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:50.164717 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:50.165017 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:50.165091 18475 ts_manager.cc:194] Registered new tserver with Master: b56e9560918346fcabcd653d9cc821fe (127.15.113.1:39191)
I20250411 13:57:50.165191 18628 heartbeater.cc:344] Connected to a master server at 127.15.113.62:38391
I20250411 13:57:50.165326 18410 ts_manager.cc:194] Registered new tserver with Master: b56e9560918346fcabcd653d9cc821fe (127.15.113.1:39191)
I20250411 13:57:50.165346 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379870165328 us; error 0 us; skew 500 ppm
I20250411 13:57:50.165781 18628 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:50.166443 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:50.166712 18628 heartbeater.cc:507] Master 127.15.113.62:38391 requested a full tablet report, sending...
I20250411 13:57:50.169132 18345 ts_manager.cc:194] Registered new tserver with Master: b56e9560918346fcabcd653d9cc821fe (127.15.113.1:39191)
I20250411 13:57:50.169265 15812 webserver.cc:466] Webserver started at http://127.15.113.2:39653/ using document root <none> and password file <none>
I20250411 13:57:50.169898 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:50.170121 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:50.170370 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:50.171353 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/ts-1-root/instance:
uuid: "f4a8aec98c4c422ca799f69f62076d12"
format_stamp: "Formatted at 2025-04-11 13:57:50 on dist-test-slave-jcj2"
I20250411 13:57:50.175386 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.003s	sys 0.001s
I20250411 13:57:50.178475 18641 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:50.179229 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.002s	sys 0.001s
I20250411 13:57:50.179567 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/ts-1-root
uuid: "f4a8aec98c4c422ca799f69f62076d12"
format_stamp: "Formatted at 2025-04-11 13:57:50 on dist-test-slave-jcj2"
I20250411 13:57:50.179828 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/ts-1-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/ts-1-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/ts-1-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:50.191077 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:50.192082 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:50.193341 15812 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:50.195688 15812 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:50.195891 15812 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:50.196127 15812 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:50.196300 15812 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:50.231344 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.2:34429
I20250411 13:57:50.231426 18703 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.2:34429 every 8 connection(s)
I20250411 13:57:50.238910 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:50.250909 18711 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:50.250952 18704 heartbeater.cc:344] Connected to a master server at 127.15.113.61:43265
I20250411 13:57:50.252125 18704 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:50.252974 18705 heartbeater.cc:344] Connected to a master server at 127.15.113.60:36095
I20250411 13:57:50.253155 18704 heartbeater.cc:507] Master 127.15.113.61:43265 requested a full tablet report, sending...
I20250411 13:57:50.253342 18705 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:50.255357 18705 heartbeater.cc:507] Master 127.15.113.60:36095 requested a full tablet report, sending...
W20250411 13:57:50.256168 18712 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:50.257149 18410 ts_manager.cc:194] Registered new tserver with Master: f4a8aec98c4c422ca799f69f62076d12 (127.15.113.2:34429)
I20250411 13:57:50.259361 18475 ts_manager.cc:194] Registered new tserver with Master: f4a8aec98c4c422ca799f69f62076d12 (127.15.113.2:34429)
W20250411 13:57:50.263489 18714 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:50.264055 18706 heartbeater.cc:344] Connected to a master server at 127.15.113.62:38391
I20250411 13:57:50.264113 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:50.264613 18706 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:50.265313 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
I20250411 13:57:50.265403 18706 heartbeater.cc:507] Master 127.15.113.62:38391 requested a full tablet report, sending...
W20250411 13:57:50.265565 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:50.265807 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379870265791 us; error 0 us; skew 500 ppm
I20250411 13:57:50.266464 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:50.268047 18345 ts_manager.cc:194] Registered new tserver with Master: f4a8aec98c4c422ca799f69f62076d12 (127.15.113.2:34429)
I20250411 13:57:50.269205 15812 webserver.cc:466] Webserver started at http://127.15.113.3:35223/ using document root <none> and password file <none>
I20250411 13:57:50.269702 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:50.269902 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:50.270146 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:50.271261 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/ts-2-root/instance:
uuid: "0c96f521a079440da6dfe205c16377ee"
format_stamp: "Formatted at 2025-04-11 13:57:50 on dist-test-slave-jcj2"
I20250411 13:57:50.275502 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.003s	sys 0.001s
I20250411 13:57:50.278566 18719 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:50.279361 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.002s	sys 0.001s
I20250411 13:57:50.279664 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/ts-2-root
uuid: "0c96f521a079440da6dfe205c16377ee"
format_stamp: "Formatted at 2025-04-11 13:57:50 on dist-test-slave-jcj2"
I20250411 13:57:50.279929 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/ts-2-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/ts-2-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster.1744379822517842-15812-0/minicluster-data/ts-2-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:50.283510 18525 raft_consensus.cc:491] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:57:50.283993 18525 raft_consensus.cc:513] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a48ffb81d16f42fe8310518e1577cb06" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } } peers { permanent_uuid: "fdfc4352b255464f810d4e46e7ae440e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } } peers { permanent_uuid: "9dd393f25aff447caff18546cac8b357" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } }
I20250411 13:57:50.285517 18525 leader_election.cc:290] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers a48ffb81d16f42fe8310518e1577cb06 (127.15.113.62:38391), 9dd393f25aff447caff18546cac8b357 (127.15.113.60:36095)
I20250411 13:57:50.286480 18485 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "fdfc4352b255464f810d4e46e7ae440e" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "9dd393f25aff447caff18546cac8b357" is_pre_election: true
I20250411 13:57:50.286393 18355 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "fdfc4352b255464f810d4e46e7ae440e" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "a48ffb81d16f42fe8310518e1577cb06" is_pre_election: true
I20250411 13:57:50.287125 18485 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate fdfc4352b255464f810d4e46e7ae440e in term 0.
I20250411 13:57:50.287170 18355 raft_consensus.cc:2466] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate fdfc4352b255464f810d4e46e7ae440e in term 0.
I20250411 13:57:50.288496 18394 leader_election.cc:304] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: a48ffb81d16f42fe8310518e1577cb06, fdfc4352b255464f810d4e46e7ae440e; no voters: 
I20250411 13:57:50.289219 18525 raft_consensus.cc:2802] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:57:50.289510 18525 raft_consensus.cc:491] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:57:50.289775 18525 raft_consensus.cc:3058] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:50.294211 18525 raft_consensus.cc:513] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a48ffb81d16f42fe8310518e1577cb06" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } } peers { permanent_uuid: "fdfc4352b255464f810d4e46e7ae440e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } } peers { permanent_uuid: "9dd393f25aff447caff18546cac8b357" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } }
I20250411 13:57:50.295804 18525 leader_election.cc:290] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [CANDIDATE]: Term 1 election: Requested vote from peers a48ffb81d16f42fe8310518e1577cb06 (127.15.113.62:38391), 9dd393f25aff447caff18546cac8b357 (127.15.113.60:36095)
I20250411 13:57:50.296576 18355 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "fdfc4352b255464f810d4e46e7ae440e" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "a48ffb81d16f42fe8310518e1577cb06"
I20250411 13:57:50.296888 18485 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "fdfc4352b255464f810d4e46e7ae440e" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "9dd393f25aff447caff18546cac8b357"
I20250411 13:57:50.297117 18355 raft_consensus.cc:3058] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:50.297246 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:50.297408 18485 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:50.298420 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:50.303459 15812 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:50.303767 18355 raft_consensus.cc:2466] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate fdfc4352b255464f810d4e46e7ae440e in term 1.
I20250411 13:57:50.303767 18485 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate fdfc4352b255464f810d4e46e7ae440e in term 1.
I20250411 13:57:50.304989 18394 leader_election.cc:304] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [CANDIDATE]: Term 1 election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 9dd393f25aff447caff18546cac8b357, fdfc4352b255464f810d4e46e7ae440e; no voters: 
I20250411 13:57:50.305586 18525 raft_consensus.cc:2802] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [term 1 FOLLOWER]: Leader election won for term 1
I20250411 13:57:50.306406 15812 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:50.306651 15812 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:50.306908 15812 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:50.306911 18525 raft_consensus.cc:695] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [term 1 LEADER]: Becoming Leader. State: Replica: fdfc4352b255464f810d4e46e7ae440e, State: Running, Role: LEADER
I20250411 13:57:50.307341 15812 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.001s	sys 0.000s
I20250411 13:57:50.307806 18525 consensus_queue.cc:237] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 1, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a48ffb81d16f42fe8310518e1577cb06" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } } peers { permanent_uuid: "fdfc4352b255464f810d4e46e7ae440e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } } peers { permanent_uuid: "9dd393f25aff447caff18546cac8b357" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } }
I20250411 13:57:50.311115 18726 sys_catalog.cc:455] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [sys.catalog]: SysCatalogTable state changed. Reason: New leader fdfc4352b255464f810d4e46e7ae440e. Latest consensus state: current_term: 1 leader_uuid: "fdfc4352b255464f810d4e46e7ae440e" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a48ffb81d16f42fe8310518e1577cb06" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } } peers { permanent_uuid: "fdfc4352b255464f810d4e46e7ae440e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } } peers { permanent_uuid: "9dd393f25aff447caff18546cac8b357" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } } }
I20250411 13:57:50.311650 18726 sys_catalog.cc:458] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:50.313162 18729 catalog_manager.cc:1477] Loading table and tablet metadata into memory...
I20250411 13:57:50.318444 18729 catalog_manager.cc:1486] Initializing Kudu cluster ID...
I20250411 13:57:50.328009 18485 raft_consensus.cc:1273] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357 [term 1 FOLLOWER]: Refusing update from remote peer fdfc4352b255464f810d4e46e7ae440e: Log matching property violated. Preceding OpId in replica: term: 0 index: 0. Preceding OpId from leader: term: 1 index: 2. (index mismatch)
I20250411 13:57:50.328212 18355 raft_consensus.cc:1273] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06 [term 1 FOLLOWER]: Refusing update from remote peer fdfc4352b255464f810d4e46e7ae440e: Log matching property violated. Preceding OpId in replica: term: 0 index: 0. Preceding OpId from leader: term: 1 index: 2. (index mismatch)
I20250411 13:57:50.329624 18525 consensus_queue.cc:1035] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [LEADER]: Connected to new peer: Peer: permanent_uuid: "a48ffb81d16f42fe8310518e1577cb06" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:57:50.330335 18726 consensus_queue.cc:1035] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [LEADER]: Connected to new peer: Peer: permanent_uuid: "9dd393f25aff447caff18546cac8b357" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:57:50.340929 18518 sys_catalog.cc:455] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06 [sys.catalog]: SysCatalogTable state changed. Reason: New leader fdfc4352b255464f810d4e46e7ae440e. Latest consensus state: current_term: 1 leader_uuid: "fdfc4352b255464f810d4e46e7ae440e" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a48ffb81d16f42fe8310518e1577cb06" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } } peers { permanent_uuid: "fdfc4352b255464f810d4e46e7ae440e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } } peers { permanent_uuid: "9dd393f25aff447caff18546cac8b357" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } } }
I20250411 13:57:50.341892 18518 sys_catalog.cc:458] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:50.348661 18520 sys_catalog.cc:455] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357 [sys.catalog]: SysCatalogTable state changed. Reason: New leader fdfc4352b255464f810d4e46e7ae440e. Latest consensus state: current_term: 1 leader_uuid: "fdfc4352b255464f810d4e46e7ae440e" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a48ffb81d16f42fe8310518e1577cb06" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } } peers { permanent_uuid: "fdfc4352b255464f810d4e46e7ae440e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } } peers { permanent_uuid: "9dd393f25aff447caff18546cac8b357" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } } }
I20250411 13:57:50.349375 18520 sys_catalog.cc:458] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:50.350590 18726 sys_catalog.cc:455] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [sys.catalog]: SysCatalogTable state changed. Reason: Peer health change. Latest consensus state: current_term: 1 leader_uuid: "fdfc4352b255464f810d4e46e7ae440e" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a48ffb81d16f42fe8310518e1577cb06" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } } peers { permanent_uuid: "fdfc4352b255464f810d4e46e7ae440e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } } peers { permanent_uuid: "9dd393f25aff447caff18546cac8b357" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } } }
I20250411 13:57:50.351572 18726 sys_catalog.cc:458] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:50.352986 18729 catalog_manager.cc:1349] Generated new cluster ID: 2ff1b86c51ad4cc2b154ada6b0628ea0
I20250411 13:57:50.353385 18729 catalog_manager.cc:1497] Initializing Kudu internal certificate authority...
I20250411 13:57:50.362300 18518 sys_catalog.cc:455] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06 [sys.catalog]: SysCatalogTable state changed. Reason: Replicated consensus-only round. Latest consensus state: current_term: 1 leader_uuid: "fdfc4352b255464f810d4e46e7ae440e" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a48ffb81d16f42fe8310518e1577cb06" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } } peers { permanent_uuid: "fdfc4352b255464f810d4e46e7ae440e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } } peers { permanent_uuid: "9dd393f25aff447caff18546cac8b357" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } } }
I20250411 13:57:50.363090 18518 sys_catalog.cc:458] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:50.369609 18525 sys_catalog.cc:455] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [sys.catalog]: SysCatalogTable state changed. Reason: Peer health change. Latest consensus state: current_term: 1 leader_uuid: "fdfc4352b255464f810d4e46e7ae440e" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a48ffb81d16f42fe8310518e1577cb06" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } } peers { permanent_uuid: "fdfc4352b255464f810d4e46e7ae440e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } } peers { permanent_uuid: "9dd393f25aff447caff18546cac8b357" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } } }
I20250411 13:57:50.370618 18525 sys_catalog.cc:458] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:50.374042 18520 sys_catalog.cc:455] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357 [sys.catalog]: SysCatalogTable state changed. Reason: Replicated consensus-only round. Latest consensus state: current_term: 1 leader_uuid: "fdfc4352b255464f810d4e46e7ae440e" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "a48ffb81d16f42fe8310518e1577cb06" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 38391 } } peers { permanent_uuid: "fdfc4352b255464f810d4e46e7ae440e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 43265 } } peers { permanent_uuid: "9dd393f25aff447caff18546cac8b357" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36095 } } }
I20250411 13:57:50.375928 18520 sys_catalog.cc:458] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:50.397436 18729 catalog_manager.cc:1372] Generated new certificate authority record
I20250411 13:57:50.399670 18729 catalog_manager.cc:1506] Loading token signing keys...
I20250411 13:57:50.408324 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.3:46367
I20250411 13:57:50.408394 18793 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.3:46367 every 8 connection(s)
I20250411 13:57:50.439857 18795 heartbeater.cc:344] Connected to a master server at 127.15.113.60:36095
I20250411 13:57:50.440271 18795 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:50.440611 18729 catalog_manager.cc:5954] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e: Generated new TSK 0
I20250411 13:57:50.441157 18795 heartbeater.cc:507] Master 127.15.113.60:36095 requested a full tablet report, sending...
I20250411 13:57:50.441285 18729 catalog_manager.cc:1516] Initializing in-progress tserver states...
I20250411 13:57:50.443042 18794 heartbeater.cc:344] Connected to a master server at 127.15.113.61:43265
I20250411 13:57:50.443395 18794 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:50.444250 18794 heartbeater.cc:507] Master 127.15.113.61:43265 requested a full tablet report, sending...
I20250411 13:57:50.446555 18410 ts_manager.cc:194] Registered new tserver with Master: 0c96f521a079440da6dfe205c16377ee (127.15.113.3:46367)
I20250411 13:57:50.446633 18475 ts_manager.cc:194] Registered new tserver with Master: 0c96f521a079440da6dfe205c16377ee (127.15.113.3:46367)
I20250411 13:57:50.447865 18796 heartbeater.cc:344] Connected to a master server at 127.15.113.62:38391
I20250411 13:57:50.448221 18796 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:50.448828 18410 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.0.0.1:45704
I20250411 13:57:50.448870 18796 heartbeater.cc:507] Master 127.15.113.62:38391 requested a full tablet report, sending...
I20250411 13:57:50.451550 18345 ts_manager.cc:194] Registered new tserver with Master: 0c96f521a079440da6dfe205c16377ee (127.15.113.3:46367)
I20250411 13:57:50.452548 15812 internal_mini_cluster.cc:371] 3 TS(s) registered with all masters after 0.03581339s
I20250411 13:57:50.467890 18345 ts_manager.cc:194] Registered new tserver with Master: fake-ts-uuid (localhost:1000)
I20250411 13:57:50.480751 18410 ts_manager.cc:194] Registered new tserver with Master: fake-ts-uuid (localhost:1000)
I20250411 13:57:50.490630 18475 ts_manager.cc:194] Registered new tserver with Master: fake-ts-uuid (localhost:1000)
I20250411 13:57:50.491978 15812 internal_mini_cluster.cc:371] 4 TS(s) registered with all masters after 0.000242346s
I20250411 13:57:50.493007 15812 tablet_server.cc:178] TabletServer@127.15.113.1:0 shutting down...
I20250411 13:57:50.509639 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:50.525238 15812 tablet_server.cc:195] TabletServer@127.15.113.1:0 shutdown complete.
I20250411 13:57:50.532837 15812 tablet_server.cc:178] TabletServer@127.15.113.2:0 shutting down...
I20250411 13:57:50.549041 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:50.564899 15812 tablet_server.cc:195] TabletServer@127.15.113.2:0 shutdown complete.
I20250411 13:57:50.571856 15812 tablet_server.cc:178] TabletServer@127.15.113.3:0 shutting down...
I20250411 13:57:50.586814 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:50.602427 15812 tablet_server.cc:195] TabletServer@127.15.113.3:0 shutdown complete.
I20250411 13:57:50.609688 15812 master.cc:561] Master@127.15.113.62:38391 shutting down...
I20250411 13:57:50.623800 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06 [term 1 FOLLOWER]: Raft consensus shutting down.
I20250411 13:57:50.624305 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06 [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:50.624567 15812 tablet_replica.cc:331] T 00000000000000000000000000000000 P a48ffb81d16f42fe8310518e1577cb06: stopping tablet replica
I20250411 13:57:50.642961 15812 master.cc:583] Master@127.15.113.62:38391 shutdown complete.
I20250411 13:57:50.652547 15812 master.cc:561] Master@127.15.113.61:43265 shutting down...
I20250411 13:57:50.665531 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [term 1 LEADER]: Raft consensus shutting down.
I20250411 13:57:50.666334 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:50.666762 15812 tablet_replica.cc:331] T 00000000000000000000000000000000 P fdfc4352b255464f810d4e46e7ae440e: stopping tablet replica
I20250411 13:57:50.686313 15812 master.cc:583] Master@127.15.113.61:43265 shutdown complete.
I20250411 13:57:50.696025 15812 master.cc:561] Master@127.15.113.60:36095 shutting down...
I20250411 13:57:50.708611 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357 [term 1 FOLLOWER]: Raft consensus shutting down.
I20250411 13:57:50.709116 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357 [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:50.709388 15812 tablet_replica.cc:331] T 00000000000000000000000000000000 P 9dd393f25aff447caff18546cac8b357: stopping tablet replica
I20250411 13:57:50.727411 15812 master.cc:583] Master@127.15.113.60:36095 shutdown complete.
[       OK ] MasterReplicationTest.TestHeartbeatAcceptedByAnyMaster (1057 ms)
[ RUN      ] MasterReplicationTest.TestMasterPeerSetsDontMatch
I20250411 13:57:50.747754 15812 internal_mini_cluster.cc:156] Creating distributed mini masters. Addrs: 127.15.113.62:41873,127.15.113.61:38557,127.15.113.60:36563
I20250411 13:57:50.748899 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:50.753331 18807 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:50.754799 18808 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:50.754899 18810 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:50.755666 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:50.756394 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:50.756583 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:50.756721 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379870756703 us; error 0 us; skew 500 ppm
I20250411 13:57:50.757210 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:50.759495 15812 webserver.cc:466] Webserver started at http://127.15.113.62:45863/ using document root <none> and password file <none>
I20250411 13:57:50.759944 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:50.760109 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:50.760339 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:50.761281 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/master-0-root/instance:
uuid: "b8b61e2aed9a447ea08380582a8a5c51"
format_stamp: "Formatted at 2025-04-11 13:57:50 on dist-test-slave-jcj2"
I20250411 13:57:50.765254 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.002s	sys 0.003s
I20250411 13:57:50.768253 18815 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:50.768934 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.001s	sys 0.000s
I20250411 13:57:50.769189 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/master-0-root
uuid: "b8b61e2aed9a447ea08380582a8a5c51"
format_stamp: "Formatted at 2025-04-11 13:57:50 on dist-test-slave-jcj2"
I20250411 13:57:50.769443 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/master-0-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/master-0-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/master-0-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:50.781746 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:50.782714 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:50.815109 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.62:41873
I20250411 13:57:50.815201 18866 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.62:41873 every 8 connection(s)
I20250411 13:57:50.818742 18867 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:50.819068 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:50.823943 18869 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:50.825981 18867 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:57:50.826296 18870 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:50.827066 18872 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:50.828541 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:50.829346 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:50.829594 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:50.829775 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379870829757 us; error 0 us; skew 500 ppm
I20250411 13:57:50.830353 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:50.832912 15812 webserver.cc:466] Webserver started at http://127.15.113.61:43975/ using document root <none> and password file <none>
I20250411 13:57:50.833460 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:50.833690 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:50.833995 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:50.835431 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/master-1-root/instance:
uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846"
format_stamp: "Formatted at 2025-04-11 13:57:50 on dist-test-slave-jcj2"
I20250411 13:57:50.838917 18867 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:50.840626 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.005s	user 0.005s	sys 0.000s
W20250411 13:57:50.843961 18867 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:38557: Network error: Client connection negotiation failed: client connection to 127.15.113.61:38557: connect: Connection refused (error 111)
I20250411 13:57:50.844154 18880 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:50.844959 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.002s	sys 0.001s
I20250411 13:57:50.845247 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/master-1-root
uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846"
format_stamp: "Formatted at 2025-04-11 13:57:50 on dist-test-slave-jcj2"
I20250411 13:57:50.845515 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/master-1-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/master-1-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/master-1-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:50.865475 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:50.866480 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:50.867415 18867 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } attempt: 1
W20250411 13:57:50.871657 18867 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:38557: Network error: Client connection negotiation failed: client connection to 127.15.113.61:38557: connect: Connection refused (error 111)
I20250411 13:57:50.900354 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.61:38557
I20250411 13:57:50.900432 18931 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.61:38557 every 8 connection(s)
I20250411 13:57:50.903990 18932 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:50.904192 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:50.909080 18934 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:50.911254 18935 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:50.911463 18932 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:50.912387 15812 server_base.cc:1034] running on GCE node
W20250411 13:57:50.914054 18937 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:50.915076 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:50.915339 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:50.915539 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379870915522 us; error 0 us; skew 500 ppm
I20250411 13:57:50.916211 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:50.918774 15812 webserver.cc:466] Webserver started at http://127.15.113.60:40283/ using document root <none> and password file <none>
I20250411 13:57:50.919270 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:50.919451 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:50.919700 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:50.920850 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/master-2-root/instance:
uuid: "21962699a13b4a0c8ea62dee04623e38"
format_stamp: "Formatted at 2025-04-11 13:57:50 on dist-test-slave-jcj2"
I20250411 13:57:50.923265 18932 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:50.925721 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.006s	sys 0.000s
I20250411 13:57:50.929615 18944 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:50.930397 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.002s	sys 0.000s
I20250411 13:57:50.930688 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/master-2-root
uuid: "21962699a13b4a0c8ea62dee04623e38"
format_stamp: "Formatted at 2025-04-11 13:57:50 on dist-test-slave-jcj2"
I20250411 13:57:50.931056 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/master-2-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/master-2-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/master-2-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:50.933036 18867 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } attempt: 2
I20250411 13:57:50.934527 18932 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:57:50.942224 18932 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:36563: Network error: Client connection negotiation failed: client connection to 127.15.113.60:36563: connect: Connection refused (error 111)
I20250411 13:57:50.944512 18867 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:57:50.948627 18867 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:36563: Network error: Client connection negotiation failed: client connection to 127.15.113.60:36563: connect: Connection refused (error 111)
I20250411 13:57:50.950052 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:50.951084 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:50.975082 18867 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } attempt: 1
W20250411 13:57:50.979514 18867 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:36563: Network error: Client connection negotiation failed: client connection to 127.15.113.60:36563: connect: Connection refused (error 111)
I20250411 13:57:50.986302 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.60:36563
I20250411 13:57:50.986383 18996 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.60:36563 every 8 connection(s)
I20250411 13:57:50.989212 15812 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 0
I20250411 13:57:50.990142 18997 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:50.994840 18932 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } attempt: 1
I20250411 13:57:50.995883 18997 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:51.006914 18997 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:51.013116 18932 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846: Bootstrap starting.
I20250411 13:57:51.016698 18997 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:51.018949 18932 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:51.023370 18932 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846: No bootstrap required, opened a new log
I20250411 13:57:51.026027 18867 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } attempt: 2
I20250411 13:57:51.025887 18932 raft_consensus.cc:357] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } } peers { permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } } peers { permanent_uuid: "21962699a13b4a0c8ea62dee04623e38" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } }
I20250411 13:57:51.026589 18932 raft_consensus.cc:383] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:51.026890 18932 raft_consensus.cc:738] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 57d875f18a9e4ea8aaa6dbb2f93f7846, State: Initialized, Role: FOLLOWER
I20250411 13:57:51.027516 18932 consensus_queue.cc:260] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } } peers { permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } } peers { permanent_uuid: "21962699a13b4a0c8ea62dee04623e38" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } }
I20250411 13:57:51.029450 19002 sys_catalog.cc:455] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } } peers { permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } } peers { permanent_uuid: "21962699a13b4a0c8ea62dee04623e38" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } } }
I20250411 13:57:51.030121 19002 sys_catalog.cc:458] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:51.030957 18932 sys_catalog.cc:564] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:51.033946 18997 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38: Bootstrap starting.
I20250411 13:57:51.040114 18997 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:51.045094 18997 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38: No bootstrap required, opened a new log
I20250411 13:57:51.045210 18867 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51: Bootstrap starting.
I20250411 13:57:51.048244 18997 raft_consensus.cc:357] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } } peers { permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } } peers { permanent_uuid: "21962699a13b4a0c8ea62dee04623e38" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } }
I20250411 13:57:51.048985 18997 raft_consensus.cc:383] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:51.049263 18997 raft_consensus.cc:738] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 21962699a13b4a0c8ea62dee04623e38, State: Initialized, Role: FOLLOWER
I20250411 13:57:51.049958 18997 consensus_queue.cc:260] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } } peers { permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } } peers { permanent_uuid: "21962699a13b4a0c8ea62dee04623e38" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } }
I20250411 13:57:51.051455 18867 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51: Neither blocks nor log segments found. Creating new log.
W20250411 13:57:51.051836 19014 catalog_manager.cc:1560] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
W20250411 13:57:51.052172 19014 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
I20250411 13:57:51.052031 19015 sys_catalog.cc:455] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } } peers { permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } } peers { permanent_uuid: "21962699a13b4a0c8ea62dee04623e38" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } } }
I20250411 13:57:51.052811 19015 sys_catalog.cc:458] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:51.053746 18997 sys_catalog.cc:564] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:51.056308 18867 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51: No bootstrap required, opened a new log
I20250411 13:57:51.059113 18867 raft_consensus.cc:357] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } } peers { permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } } peers { permanent_uuid: "21962699a13b4a0c8ea62dee04623e38" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } }
I20250411 13:57:51.059788 18867 raft_consensus.cc:383] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:51.060057 18867 raft_consensus.cc:738] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: b8b61e2aed9a447ea08380582a8a5c51, State: Initialized, Role: FOLLOWER
I20250411 13:57:51.060659 18867 consensus_queue.cc:260] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } } peers { permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } } peers { permanent_uuid: "21962699a13b4a0c8ea62dee04623e38" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } }
I20250411 13:57:51.062501 19023 sys_catalog.cc:455] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } } peers { permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } } peers { permanent_uuid: "21962699a13b4a0c8ea62dee04623e38" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } } }
I20250411 13:57:51.063163 19023 sys_catalog.cc:458] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:51.064682 18867 sys_catalog.cc:564] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51 [sys.catalog]: configured and running, proceeding with master startup.
W20250411 13:57:51.068311 19028 catalog_manager.cc:1560] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
W20250411 13:57:51.068598 19028 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
I20250411 13:57:51.074956 15812 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 1
I20250411 13:57:51.075297 15812 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 2
I20250411 13:57:51.077104 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:51.078680 19039 catalog_manager.cc:1560] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
W20250411 13:57:51.079031 19039 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
W20250411 13:57:51.083422 19040 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:51.083998 19041 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:51.085431 19043 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:51.086169 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:51.086907 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:51.087136 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:51.087284 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379871087270 us; error 0 us; skew 500 ppm
I20250411 13:57:51.087774 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:51.090018 15812 webserver.cc:466] Webserver started at http://127.15.113.1:40989/ using document root <none> and password file <none>
I20250411 13:57:51.090461 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:51.090642 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:51.090912 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:51.091966 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/ts-0-root/instance:
uuid: "da037644067f403d9f0e93e3c28aa60c"
format_stamp: "Formatted at 2025-04-11 13:57:51 on dist-test-slave-jcj2"
I20250411 13:57:51.096062 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.005s	sys 0.000s
I20250411 13:57:51.099090 19048 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:51.099802 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.003s	sys 0.000s
I20250411 13:57:51.100067 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/ts-0-root
uuid: "da037644067f403d9f0e93e3c28aa60c"
format_stamp: "Formatted at 2025-04-11 13:57:51 on dist-test-slave-jcj2"
I20250411 13:57:51.100323 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/ts-0-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/ts-0-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/ts-0-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:51.131729 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:51.132791 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:51.134241 15812 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:51.136462 15812 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:51.136669 15812 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:51.136890 15812 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:51.137050 15812 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:51.172699 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.1:45019
I20250411 13:57:51.172782 19110 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.1:45019 every 8 connection(s)
I20250411 13:57:51.185657 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:51.192580 19111 heartbeater.cc:344] Connected to a master server at 127.15.113.60:36563
I20250411 13:57:51.193008 19111 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:51.194048 19111 heartbeater.cc:507] Master 127.15.113.60:36563 requested a full tablet report, sending...
W20250411 13:57:51.196527 19118 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:51.197649 18962 ts_manager.cc:194] Registered new tserver with Master: da037644067f403d9f0e93e3c28aa60c (127.15.113.1:45019)
I20250411 13:57:51.201545 19112 heartbeater.cc:344] Connected to a master server at 127.15.113.61:38557
I20250411 13:57:51.202018 19112 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:51.202273 19113 heartbeater.cc:344] Connected to a master server at 127.15.113.62:41873
I20250411 13:57:51.202699 19113 heartbeater.cc:461] Registering TS with master...
W20250411 13:57:51.202720 19119 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:51.203112 19112 heartbeater.cc:507] Master 127.15.113.61:38557 requested a full tablet report, sending...
I20250411 13:57:51.203711 19113 heartbeater.cc:507] Master 127.15.113.62:41873 requested a full tablet report, sending...
I20250411 13:57:51.205734 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:51.206324 18897 ts_manager.cc:194] Registered new tserver with Master: da037644067f403d9f0e93e3c28aa60c (127.15.113.1:45019)
I20250411 13:57:51.206548 18832 ts_manager.cc:194] Registered new tserver with Master: da037644067f403d9f0e93e3c28aa60c (127.15.113.1:45019)
W20250411 13:57:51.206892 19121 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:51.207930 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:51.208117 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:51.208250 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379871208239 us; error 0 us; skew 500 ppm
I20250411 13:57:51.208679 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:51.210980 15812 webserver.cc:466] Webserver started at http://127.15.113.2:44089/ using document root <none> and password file <none>
I20250411 13:57:51.211422 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:51.211583 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:51.211799 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:51.212731 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/ts-1-root/instance:
uuid: "f65ea839399843768d75ea7f8333f280"
format_stamp: "Formatted at 2025-04-11 13:57:51 on dist-test-slave-jcj2"
I20250411 13:57:51.216943 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.005s	sys 0.000s
I20250411 13:57:51.220003 19126 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:51.220757 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.002s	sys 0.000s
I20250411 13:57:51.221025 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/ts-1-root
uuid: "f65ea839399843768d75ea7f8333f280"
format_stamp: "Formatted at 2025-04-11 13:57:51 on dist-test-slave-jcj2"
I20250411 13:57:51.221256 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/ts-1-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/ts-1-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/ts-1-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:51.240288 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:51.241283 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:51.242555 15812 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:51.244710 15812 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:51.244899 15812 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:51.245155 15812 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:51.245301 15812 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:51.284801 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.2:35501
I20250411 13:57:51.284893 19188 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.2:35501 every 8 connection(s)
I20250411 13:57:51.305711 19015 raft_consensus.cc:491] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:57:51.306420 19015 raft_consensus.cc:513] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } } peers { permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } } peers { permanent_uuid: "21962699a13b4a0c8ea62dee04623e38" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } }
I20250411 13:57:51.309438 19002 raft_consensus.cc:491] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:57:51.309851 19015 leader_election.cc:290] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers b8b61e2aed9a447ea08380582a8a5c51 (127.15.113.62:41873), 57d875f18a9e4ea8aaa6dbb2f93f7846 (127.15.113.61:38557)
I20250411 13:57:51.310102 19002 raft_consensus.cc:513] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } } peers { permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } } peers { permanent_uuid: "21962699a13b4a0c8ea62dee04623e38" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } }
I20250411 13:57:51.312547 19189 heartbeater.cc:344] Connected to a master server at 127.15.113.60:36563
I20250411 13:57:51.312942 19189 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:51.313678 19189 heartbeater.cc:507] Master 127.15.113.60:36563 requested a full tablet report, sending...
I20250411 13:57:51.313760 18972 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "21962699a13b4a0c8ea62dee04623e38" is_pre_election: true
I20250411 13:57:51.313872 18842 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "21962699a13b4a0c8ea62dee04623e38" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "b8b61e2aed9a447ea08380582a8a5c51" is_pre_election: true
I20250411 13:57:51.314514 18972 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 57d875f18a9e4ea8aaa6dbb2f93f7846 in term 0.
I20250411 13:57:51.314958 18907 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "21962699a13b4a0c8ea62dee04623e38" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" is_pre_election: true
I20250411 13:57:51.315832 18907 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 21962699a13b4a0c8ea62dee04623e38 in term 0.
I20250411 13:57:51.316828 18962 ts_manager.cc:194] Registered new tserver with Master: f65ea839399843768d75ea7f8333f280 (127.15.113.2:35501)
I20250411 13:57:51.317977 18882 leader_election.cc:304] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 21962699a13b4a0c8ea62dee04623e38, 57d875f18a9e4ea8aaa6dbb2f93f7846; no voters: 
I20250411 13:57:51.319304 18841 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "b8b61e2aed9a447ea08380582a8a5c51" is_pre_election: true
I20250411 13:57:51.320152 18841 raft_consensus.cc:2466] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 57d875f18a9e4ea8aaa6dbb2f93f7846 in term 0.
I20250411 13:57:51.318392 18946 leader_election.cc:304] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 21962699a13b4a0c8ea62dee04623e38, 57d875f18a9e4ea8aaa6dbb2f93f7846; no voters: 
I20250411 13:57:51.322396 19015 raft_consensus.cc:2802] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:57:51.322918 19015 raft_consensus.cc:491] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:57:51.323343 19015 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:51.326308 18842 raft_consensus.cc:2466] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 21962699a13b4a0c8ea62dee04623e38 in term 0.
I20250411 13:57:51.312664 19002 leader_election.cc:290] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers b8b61e2aed9a447ea08380582a8a5c51 (127.15.113.62:41873), 21962699a13b4a0c8ea62dee04623e38 (127.15.113.60:36563)
I20250411 13:57:51.327833 19002 raft_consensus.cc:2802] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:57:51.328274 19002 raft_consensus.cc:491] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:57:51.328732 19002 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:51.332625 19190 heartbeater.cc:344] Connected to a master server at 127.15.113.61:38557
I20250411 13:57:51.333119 19190 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:51.334232 19190 heartbeater.cc:507] Master 127.15.113.61:38557 requested a full tablet report, sending...
I20250411 13:57:51.338402 19015 raft_consensus.cc:513] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } } peers { permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } } peers { permanent_uuid: "21962699a13b4a0c8ea62dee04623e38" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } }
I20250411 13:57:51.340546 19002 raft_consensus.cc:513] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } } peers { permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } } peers { permanent_uuid: "21962699a13b4a0c8ea62dee04623e38" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } }
I20250411 13:57:51.341624 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:51.343757 18897 ts_manager.cc:194] Registered new tserver with Master: f65ea839399843768d75ea7f8333f280 (127.15.113.2:35501)
I20250411 13:57:51.344182 18907 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "21962699a13b4a0c8ea62dee04623e38" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846"
I20250411 13:57:51.344270 18842 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "21962699a13b4a0c8ea62dee04623e38" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "b8b61e2aed9a447ea08380582a8a5c51"
I20250411 13:57:51.344945 18907 raft_consensus.cc:2391] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [term 1 FOLLOWER]: Leader election vote request: Denying vote to candidate 21962699a13b4a0c8ea62dee04623e38 in current term 1: Already voted for candidate 57d875f18a9e4ea8aaa6dbb2f93f7846 in this term.
I20250411 13:57:51.346567 19002 leader_election.cc:290] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [CANDIDATE]: Term 1 election: Requested vote from peers b8b61e2aed9a447ea08380582a8a5c51 (127.15.113.62:41873), 21962699a13b4a0c8ea62dee04623e38 (127.15.113.60:36563)
I20250411 13:57:51.347882 18841 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "b8b61e2aed9a447ea08380582a8a5c51"
I20250411 13:57:51.344892 18842 raft_consensus.cc:3058] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51 [term 0 FOLLOWER]: Advancing to term 1
W20250411 13:57:51.351058 19197 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:51.359434 18842 raft_consensus.cc:2466] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate 21962699a13b4a0c8ea62dee04623e38 in term 1.
I20250411 13:57:51.360054 19015 leader_election.cc:290] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [CANDIDATE]: Term 1 election: Requested vote from peers b8b61e2aed9a447ea08380582a8a5c51 (127.15.113.62:41873), 57d875f18a9e4ea8aaa6dbb2f93f7846 (127.15.113.61:38557)
I20250411 13:57:51.362161 18947 leader_election.cc:304] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [CANDIDATE]: Term 1 election: Election decided. Result: candidate won. Election summary: received 3 responses out of 3 voters: 2 yes votes; 1 no votes. yes voters: 21962699a13b4a0c8ea62dee04623e38, b8b61e2aed9a447ea08380582a8a5c51; no voters: 57d875f18a9e4ea8aaa6dbb2f93f7846
I20250411 13:57:51.362191 18972 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "21962699a13b4a0c8ea62dee04623e38"
I20250411 13:57:51.360275 19191 heartbeater.cc:344] Connected to a master server at 127.15.113.62:41873
I20250411 13:57:51.363610 19015 raft_consensus.cc:2802] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [term 1 FOLLOWER]: Leader election won for term 1
I20250411 13:57:51.363950 19191 heartbeater.cc:461] Registering TS with master...
W20250411 13:57:51.365069 19200 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:51.365226 19191 heartbeater.cc:507] Master 127.15.113.62:41873 requested a full tablet report, sending...
W20250411 13:57:51.366238 19198 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:51.367638 19015 raft_consensus.cc:695] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [term 1 LEADER]: Becoming Leader. State: Replica: 21962699a13b4a0c8ea62dee04623e38, State: Running, Role: LEADER
I20250411 13:57:51.368386 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:51.368613 18832 ts_manager.cc:194] Registered new tserver with Master: f65ea839399843768d75ea7f8333f280 (127.15.113.2:35501)
I20250411 13:57:51.368525 19015 consensus_queue.cc:237] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 1, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } } peers { permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } } peers { permanent_uuid: "21962699a13b4a0c8ea62dee04623e38" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } }
I20250411 13:57:51.369318 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:51.369596 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:51.369813 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379871369774 us; error 0 us; skew 500 ppm
I20250411 13:57:51.370496 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:51.373288 18882 leader_election.cc:304] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [CANDIDATE]: Term 1 election: Election decided. Result: candidate lost. Election summary: received 3 responses out of 3 voters: 1 yes votes; 2 no votes. yes voters: 57d875f18a9e4ea8aaa6dbb2f93f7846; no voters: 21962699a13b4a0c8ea62dee04623e38, b8b61e2aed9a447ea08380582a8a5c51
I20250411 13:57:51.373528 19202 sys_catalog.cc:455] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [sys.catalog]: SysCatalogTable state changed. Reason: New leader 21962699a13b4a0c8ea62dee04623e38. Latest consensus state: current_term: 1 leader_uuid: "21962699a13b4a0c8ea62dee04623e38" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } } peers { permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } } peers { permanent_uuid: "21962699a13b4a0c8ea62dee04623e38" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } } }
I20250411 13:57:51.374228 19202 sys_catalog.cc:458] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:51.374199 19196 raft_consensus.cc:2747] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [term 1 FOLLOWER]: Leader election lost for term 1. Reason: could not achieve majority
I20250411 13:57:51.375953 19206 catalog_manager.cc:1477] Loading table and tablet metadata into memory...
I20250411 13:57:51.375993 15812 webserver.cc:466] Webserver started at http://127.15.113.3:32817/ using document root <none> and password file <none>
I20250411 13:57:51.376685 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:51.376875 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:51.377117 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:51.378052 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/ts-2-root/instance:
uuid: "1310e1b5085d470b88b10a6c3b8435fc"
format_stamp: "Formatted at 2025-04-11 13:57:51 on dist-test-slave-jcj2"
I20250411 13:57:51.380174 19206 catalog_manager.cc:1486] Initializing Kudu cluster ID...
I20250411 13:57:51.382478 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.001s	sys 0.005s
I20250411 13:57:51.386106 19209 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:51.387202 15812 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.003s	sys 0.000s
I20250411 13:57:51.387547 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/ts-2-root
uuid: "1310e1b5085d470b88b10a6c3b8435fc"
format_stamp: "Formatted at 2025-04-11 13:57:51 on dist-test-slave-jcj2"
I20250411 13:57:51.387867 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/ts-2-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/ts-2-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/ts-2-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:51.388322 18842 raft_consensus.cc:1273] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51 [term 1 FOLLOWER]: Refusing update from remote peer 21962699a13b4a0c8ea62dee04623e38: Log matching property violated. Preceding OpId in replica: term: 0 index: 0. Preceding OpId from leader: term: 1 index: 2. (index mismatch)
I20250411 13:57:51.388445 18907 raft_consensus.cc:1273] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [term 1 FOLLOWER]: Refusing update from remote peer 21962699a13b4a0c8ea62dee04623e38: Log matching property violated. Preceding OpId in replica: term: 0 index: 0. Preceding OpId from leader: term: 1 index: 2. (index mismatch)
I20250411 13:57:51.389586 19015 consensus_queue.cc:1035] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [LEADER]: Connected to new peer: Peer: permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:57:51.390141 19202 consensus_queue.cc:1035] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [LEADER]: Connected to new peer: Peer: permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:57:51.398321 19023 sys_catalog.cc:455] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51 [sys.catalog]: SysCatalogTable state changed. Reason: New leader 21962699a13b4a0c8ea62dee04623e38. Latest consensus state: current_term: 1 leader_uuid: "21962699a13b4a0c8ea62dee04623e38" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } } peers { permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } } peers { permanent_uuid: "21962699a13b4a0c8ea62dee04623e38" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } } }
I20250411 13:57:51.399118 19023 sys_catalog.cc:458] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:51.403313 19196 sys_catalog.cc:455] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [sys.catalog]: SysCatalogTable state changed. Reason: New leader 21962699a13b4a0c8ea62dee04623e38. Latest consensus state: current_term: 1 leader_uuid: "21962699a13b4a0c8ea62dee04623e38" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } } peers { permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } } peers { permanent_uuid: "21962699a13b4a0c8ea62dee04623e38" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } } }
I20250411 13:57:51.403961 19196 sys_catalog.cc:458] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:51.414526 19206 catalog_manager.cc:1349] Generated new cluster ID: 27cb3a41902e49a0ae54cd30d093912e
I20250411 13:57:51.415067 19206 catalog_manager.cc:1497] Initializing Kudu internal certificate authority...
I20250411 13:57:51.415594 19015 sys_catalog.cc:455] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [sys.catalog]: SysCatalogTable state changed. Reason: Peer health change. Latest consensus state: current_term: 1 leader_uuid: "21962699a13b4a0c8ea62dee04623e38" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } } peers { permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } } peers { permanent_uuid: "21962699a13b4a0c8ea62dee04623e38" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } } }
I20250411 13:57:51.416596 19015 sys_catalog.cc:458] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:51.417330 19015 sys_catalog.cc:455] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [sys.catalog]: SysCatalogTable state changed. Reason: Peer health change. Latest consensus state: current_term: 1 leader_uuid: "21962699a13b4a0c8ea62dee04623e38" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } } peers { permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } } peers { permanent_uuid: "21962699a13b4a0c8ea62dee04623e38" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } } }
I20250411 13:57:51.418258 19015 sys_catalog.cc:458] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:51.418994 19196 sys_catalog.cc:455] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [sys.catalog]: SysCatalogTable state changed. Reason: Replicated consensus-only round. Latest consensus state: current_term: 1 leader_uuid: "21962699a13b4a0c8ea62dee04623e38" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } } peers { permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } } peers { permanent_uuid: "21962699a13b4a0c8ea62dee04623e38" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } } }
I20250411 13:57:51.419646 19196 sys_catalog.cc:458] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:51.422863 19023 sys_catalog.cc:455] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51 [sys.catalog]: SysCatalogTable state changed. Reason: Replicated consensus-only round. Latest consensus state: current_term: 1 leader_uuid: "21962699a13b4a0c8ea62dee04623e38" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "b8b61e2aed9a447ea08380582a8a5c51" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 41873 } } peers { permanent_uuid: "57d875f18a9e4ea8aaa6dbb2f93f7846" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38557 } } peers { permanent_uuid: "21962699a13b4a0c8ea62dee04623e38" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 36563 } } }
I20250411 13:57:51.423610 19023 sys_catalog.cc:458] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:51.443209 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:51.444532 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:51.451294 19206 catalog_manager.cc:1372] Generated new certificate authority record
I20250411 13:57:51.456511 15812 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:51.457870 19206 catalog_manager.cc:1506] Loading token signing keys...
I20250411 13:57:51.459216 15812 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:51.459442 15812 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:51.459712 15812 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:51.459941 15812 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:51.480866 19206 catalog_manager.cc:5954] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38: Generated new TSK 0
I20250411 13:57:51.481623 19206 catalog_manager.cc:1516] Initializing in-progress tserver states...
I20250411 13:57:51.508495 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.3:42813
I20250411 13:57:51.508569 19279 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.3:42813 every 8 connection(s)
I20250411 13:57:51.525400 19280 heartbeater.cc:344] Connected to a master server at 127.15.113.60:36563
I20250411 13:57:51.525799 19280 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:51.526559 19280 heartbeater.cc:507] Master 127.15.113.60:36563 requested a full tablet report, sending...
I20250411 13:57:51.526681 19282 heartbeater.cc:344] Connected to a master server at 127.15.113.62:41873
I20250411 13:57:51.527128 19282 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:51.527915 19282 heartbeater.cc:507] Master 127.15.113.62:41873 requested a full tablet report, sending...
I20250411 13:57:51.527988 19281 heartbeater.cc:344] Connected to a master server at 127.15.113.61:38557
I20250411 13:57:51.528525 19281 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:51.528599 18962 ts_manager.cc:194] Registered new tserver with Master: 1310e1b5085d470b88b10a6c3b8435fc (127.15.113.3:42813)
I20250411 13:57:51.529412 19281 heartbeater.cc:507] Master 127.15.113.61:38557 requested a full tablet report, sending...
I20250411 13:57:51.530440 18962 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.0.0.1:56442
I20250411 13:57:51.530496 18832 ts_manager.cc:194] Registered new tserver with Master: 1310e1b5085d470b88b10a6c3b8435fc (127.15.113.3:42813)
I20250411 13:57:51.531733 18897 ts_manager.cc:194] Registered new tserver with Master: 1310e1b5085d470b88b10a6c3b8435fc (127.15.113.3:42813)
I20250411 13:57:51.531939 15812 internal_mini_cluster.cc:371] 3 TS(s) registered with all masters after 0.018601398s
I20250411 13:57:51.534200 15812 master.cc:561] Master@127.15.113.62:41873 shutting down...
I20250411 13:57:51.551164 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51 [term 1 FOLLOWER]: Raft consensus shutting down.
I20250411 13:57:51.551677 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51 [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:51.551990 15812 tablet_replica.cc:331] T 00000000000000000000000000000000 P b8b61e2aed9a447ea08380582a8a5c51: stopping tablet replica
I20250411 13:57:51.570489 15812 master.cc:583] Master@127.15.113.62:41873 shutdown complete.
I20250411 13:57:51.580847 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:51.585338 19288 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:51.585558 19289 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:51.587878 19291 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:51.588011 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:51.589335 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:51.589509 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:51.589635 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379871589625 us; error 0 us; skew 500 ppm
I20250411 13:57:51.590059 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:51.592242 15812 webserver.cc:466] Webserver started at http://127.15.113.62:46583/ using document root <none> and password file <none>
I20250411 13:57:51.592646 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:51.592778 15812 fs_manager.cc:365] Using existing metadata directory in first data directory
I20250411 13:57:51.595964 15812 fs_manager.cc:714] Time spent opening directory manager: real 0.002s	user 0.002s	sys 0.001s
I20250411 13:57:51.598497 19296 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:51.599218 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.002s	sys 0.001s
I20250411 13:57:51.599462 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/master-0-root
uuid: "b8b61e2aed9a447ea08380582a8a5c51"
format_stamp: "Formatted at 2025-04-11 13:57:50 on dist-test-slave-jcj2"
I20250411 13:57:51.599725 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/master-0-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/master-0-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestMasterPeerSetsDontMatch.1744379822517842-15812-0/minicluster-data/master-0-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:51.613843 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:51.614802 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:51.648296 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.62:41873
I20250411 13:57:51.648378 19347 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.62:41873 every 8 connection(s)
I20250411 13:57:51.656322 19348 sys_catalog.cc:263] Verifying existing consensus state
E20250411 13:57:51.660749 19348 master.cc:416] Unable to init master catalog manager: Invalid argument: Unable to initialize catalog manager: Failed to initialize sys tables async: on-disk master list (127.15.113.60:36563, 127.15.113.61:38557, 127.15.113.62:41873) and provided master list (127.0.0.1:55555, 127.0.0.1:55556, 127.15.113.60:36563, 127.15.113.61:38557, 127.15.113.62:41873) differ by more than one address. Their symmetric difference is: 127.0.0.1:55555, 127.0.0.1:55556
I20250411 13:57:51.661337 15812 tablet_server.cc:178] TabletServer@127.15.113.1:0 shutting down...
I20250411 13:57:51.675920 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:51.691959 15812 tablet_server.cc:195] TabletServer@127.15.113.1:0 shutdown complete.
I20250411 13:57:51.699083 15812 tablet_server.cc:178] TabletServer@127.15.113.2:0 shutting down...
I20250411 13:57:51.714680 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:51.730382 15812 tablet_server.cc:195] TabletServer@127.15.113.2:0 shutdown complete.
I20250411 13:57:51.737447 15812 tablet_server.cc:178] TabletServer@127.15.113.3:0 shutting down...
I20250411 13:57:51.753046 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:51.768810 15812 tablet_server.cc:195] TabletServer@127.15.113.3:0 shutdown complete.
I20250411 13:57:51.775975 15812 master.cc:561] Master@127.15.113.62:41873 shutting down...
I20250411 13:57:51.801489 15812 master.cc:583] Master@127.15.113.62:41873 shutdown complete.
I20250411 13:57:51.807480 15812 master.cc:561] Master@127.15.113.61:38557 shutting down...
I20250411 13:57:51.819494 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [term 1 FOLLOWER]: Raft consensus shutting down.
I20250411 13:57:51.819960 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846 [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:51.820236 15812 tablet_replica.cc:331] T 00000000000000000000000000000000 P 57d875f18a9e4ea8aaa6dbb2f93f7846: stopping tablet replica
I20250411 13:57:51.837700 15812 master.cc:583] Master@127.15.113.61:38557 shutdown complete.
I20250411 13:57:51.847250 15812 master.cc:561] Master@127.15.113.60:36563 shutting down...
I20250411 13:57:51.859050 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [term 1 LEADER]: Raft consensus shutting down.
I20250411 13:57:51.859779 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38 [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:51.860142 15812 tablet_replica.cc:331] T 00000000000000000000000000000000 P 21962699a13b4a0c8ea62dee04623e38: stopping tablet replica
I20250411 13:57:51.878819 15812 master.cc:583] Master@127.15.113.60:36563 shutdown complete.
[       OK ] MasterReplicationTest.TestMasterPeerSetsDontMatch (1152 ms)
[ RUN      ] MasterReplicationTest.TestConnectToClusterReturnsAddresses
I20250411 13:57:51.899849 15812 internal_mini_cluster.cc:156] Creating distributed mini masters. Addrs: 127.15.113.62:35717,127.15.113.61:39049,127.15.113.60:43093
I20250411 13:57:51.901022 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:51.905350 19350 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:51.905570 19351 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:51.906654 19353 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:51.907647 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:51.908422 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:51.908578 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:51.908713 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379871908696 us; error 0 us; skew 500 ppm
I20250411 13:57:51.909175 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:51.911319 15812 webserver.cc:466] Webserver started at http://127.15.113.62:38597/ using document root <none> and password file <none>
I20250411 13:57:51.911728 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:51.911892 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:51.912101 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:51.913049 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/master-0-root/instance:
uuid: "2c714562d950469698e10f48fe0013c3"
format_stamp: "Formatted at 2025-04-11 13:57:51 on dist-test-slave-jcj2"
I20250411 13:57:51.917235 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.000s	sys 0.005s
I20250411 13:57:51.920080 19358 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:51.920718 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.001s	sys 0.000s
I20250411 13:57:51.920961 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/master-0-root
uuid: "2c714562d950469698e10f48fe0013c3"
format_stamp: "Formatted at 2025-04-11 13:57:51 on dist-test-slave-jcj2"
I20250411 13:57:51.921195 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/master-0-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/master-0-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/master-0-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:51.936041 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:51.937093 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:51.968936 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.62:35717
I20250411 13:57:51.968999 19409 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.62:35717 every 8 connection(s)
I20250411 13:57:51.972404 19410 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:51.972736 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:51.977852 19412 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:51.979380 19413 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:51.980651 19410 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:57:51.980767 19415 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:51.981580 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:51.982239 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:51.982443 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:51.982645 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379871982626 us; error 0 us; skew 500 ppm
I20250411 13:57:51.983439 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:51.985980 15812 webserver.cc:466] Webserver started at http://127.15.113.61:35217/ using document root <none> and password file <none>
I20250411 13:57:51.986444 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:51.986625 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:51.986852 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:51.987864 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/master-1-root/instance:
uuid: "60ddbebb0cee49548523ebcb1836922e"
format_stamp: "Formatted at 2025-04-11 13:57:51 on dist-test-slave-jcj2"
I20250411 13:57:51.992638 19410 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:51.992854 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.005s	user 0.000s	sys 0.005s
I20250411 13:57:51.996884 19423 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:51.997728 15812 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.001s	sys 0.000s
W20250411 13:57:51.998059 19410 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:39049: Network error: Client connection negotiation failed: client connection to 127.15.113.61:39049: connect: Connection refused (error 111)
I20250411 13:57:51.998063 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/master-1-root
uuid: "60ddbebb0cee49548523ebcb1836922e"
format_stamp: "Formatted at 2025-04-11 13:57:51 on dist-test-slave-jcj2"
I20250411 13:57:51.998600 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/master-1-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/master-1-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/master-1-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:52.015221 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:52.016213 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:52.049552 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.61:39049
I20250411 13:57:52.049633 19474 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.61:39049 every 8 connection(s)
I20250411 13:57:52.053066 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:52.053197 19475 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
W20250411 13:57:52.057983 19477 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:52.058851 19478 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:52.061252 19480 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:52.061342 19475 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:52.061491 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:52.062472 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:52.062701 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:52.062680 19410 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } attempt: 1
I20250411 13:57:52.063153 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379872063129 us; error 0 us; skew 500 ppm
I20250411 13:57:52.063980 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:52.068779 15812 webserver.cc:466] Webserver started at http://127.15.113.60:40869/ using document root <none> and password file <none>
I20250411 13:57:52.069396 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:52.069608 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:52.069900 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:52.071282 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/master-2-root/instance:
uuid: "8fb59848ea1f4c9f8e513f33672ac64c"
format_stamp: "Formatted at 2025-04-11 13:57:52 on dist-test-slave-jcj2"
I20250411 13:57:52.074612 19475 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:52.075850 19410 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:52.078723 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.007s	user 0.006s	sys 0.000s
W20250411 13:57:52.080965 19410 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:43093: Network error: Client connection negotiation failed: client connection to 127.15.113.60:43093: connect: Connection refused (error 111)
I20250411 13:57:52.083103 19487 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:52.083921 15812 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.004s	sys 0.000s
I20250411 13:57:52.084187 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/master-2-root
uuid: "8fb59848ea1f4c9f8e513f33672ac64c"
format_stamp: "Formatted at 2025-04-11 13:57:52 on dist-test-slave-jcj2"
I20250411 13:57:52.084432 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/master-2-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/master-2-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/master-2-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:52.085956 19475 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:57:52.091437 19475 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:43093: Network error: Client connection negotiation failed: client connection to 127.15.113.60:43093: connect: Connection refused (error 111)
I20250411 13:57:52.103433 19410 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } attempt: 1
W20250411 13:57:52.107336 19410 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:43093: Network error: Client connection negotiation failed: client connection to 127.15.113.60:43093: connect: Connection refused (error 111)
I20250411 13:57:52.109946 19475 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } attempt: 1
W20250411 13:57:52.113782 19475 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:43093: Network error: Client connection negotiation failed: client connection to 127.15.113.60:43093: connect: Connection refused (error 111)
I20250411 13:57:52.119519 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:52.120527 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:52.153777 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.60:43093
I20250411 13:57:52.153869 19539 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.60:43093 every 8 connection(s)
I20250411 13:57:52.156805 15812 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 0
I20250411 13:57:52.157325 19540 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:52.162750 19540 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:52.172425 19540 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:52.180476 19540 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:52.183766 19410 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } attempt: 2
I20250411 13:57:52.193260 19475 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } attempt: 2
I20250411 13:57:52.197170 19540 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c: Bootstrap starting.
I20250411 13:57:52.200178 19410 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3: Bootstrap starting.
I20250411 13:57:52.201880 19540 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:52.205673 19410 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:52.206547 19540 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c: No bootstrap required, opened a new log
I20250411 13:57:52.209043 19540 raft_consensus.cc:357] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "2c714562d950469698e10f48fe0013c3" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } } peers { permanent_uuid: "60ddbebb0cee49548523ebcb1836922e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } } peers { permanent_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } }
I20250411 13:57:52.209703 19540 raft_consensus.cc:383] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:52.209852 19475 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e: Bootstrap starting.
I20250411 13:57:52.210024 19540 raft_consensus.cc:738] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 8fb59848ea1f4c9f8e513f33672ac64c, State: Initialized, Role: FOLLOWER
I20250411 13:57:52.210243 19410 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3: No bootstrap required, opened a new log
I20250411 13:57:52.210783 19540 consensus_queue.cc:260] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "2c714562d950469698e10f48fe0013c3" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } } peers { permanent_uuid: "60ddbebb0cee49548523ebcb1836922e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } } peers { permanent_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } }
I20250411 13:57:52.212744 19547 sys_catalog.cc:455] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "2c714562d950469698e10f48fe0013c3" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } } peers { permanent_uuid: "60ddbebb0cee49548523ebcb1836922e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } } peers { permanent_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } } }
I20250411 13:57:52.212805 19410 raft_consensus.cc:357] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "2c714562d950469698e10f48fe0013c3" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } } peers { permanent_uuid: "60ddbebb0cee49548523ebcb1836922e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } } peers { permanent_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } }
I20250411 13:57:52.213733 19547 sys_catalog.cc:458] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:52.213784 19410 raft_consensus.cc:383] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:52.214433 19540 sys_catalog.cc:564] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:52.214371 19410 raft_consensus.cc:738] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 2c714562d950469698e10f48fe0013c3, State: Initialized, Role: FOLLOWER
I20250411 13:57:52.215042 19475 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:52.215247 19410 consensus_queue.cc:260] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "2c714562d950469698e10f48fe0013c3" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } } peers { permanent_uuid: "60ddbebb0cee49548523ebcb1836922e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } } peers { permanent_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } }
I20250411 13:57:52.217397 19548 sys_catalog.cc:455] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "2c714562d950469698e10f48fe0013c3" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } } peers { permanent_uuid: "60ddbebb0cee49548523ebcb1836922e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } } peers { permanent_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } } }
I20250411 13:57:52.218353 19548 sys_catalog.cc:458] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:52.219638 19410 sys_catalog.cc:564] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:52.222004 19475 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e: No bootstrap required, opened a new log
I20250411 13:57:52.223989 19547 raft_consensus.cc:491] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:57:52.224391 19547 raft_consensus.cc:513] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "2c714562d950469698e10f48fe0013c3" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } } peers { permanent_uuid: "60ddbebb0cee49548523ebcb1836922e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } } peers { permanent_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } }
I20250411 13:57:52.224350 19475 raft_consensus.cc:357] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "2c714562d950469698e10f48fe0013c3" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } } peers { permanent_uuid: "60ddbebb0cee49548523ebcb1836922e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } } peers { permanent_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } }
I20250411 13:57:52.225245 19475 raft_consensus.cc:383] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:52.225521 19475 raft_consensus.cc:738] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 60ddbebb0cee49548523ebcb1836922e, State: Initialized, Role: FOLLOWER
I20250411 13:57:52.226785 19547 leader_election.cc:290] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers 2c714562d950469698e10f48fe0013c3 (127.15.113.62:35717), 60ddbebb0cee49548523ebcb1836922e (127.15.113.61:39049)
I20250411 13:57:52.227856 19385 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "2c714562d950469698e10f48fe0013c3" is_pre_election: true
I20250411 13:57:52.228443 19385 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 8fb59848ea1f4c9f8e513f33672ac64c in term 0.
I20250411 13:57:52.229432 19489 leader_election.cc:304] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 2c714562d950469698e10f48fe0013c3, 8fb59848ea1f4c9f8e513f33672ac64c; no voters: 
I20250411 13:57:52.230021 19547 raft_consensus.cc:2802] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:57:52.230445 19547 raft_consensus.cc:491] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:57:52.228803 19475 consensus_queue.cc:260] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "2c714562d950469698e10f48fe0013c3" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } } peers { permanent_uuid: "60ddbebb0cee49548523ebcb1836922e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } } peers { permanent_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } }
I20250411 13:57:52.230854 19547 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:52.240123 19547 raft_consensus.cc:513] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "2c714562d950469698e10f48fe0013c3" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } } peers { permanent_uuid: "60ddbebb0cee49548523ebcb1836922e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } } peers { permanent_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } }
I20250411 13:57:52.243433 19385 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "2c714562d950469698e10f48fe0013c3"
I20250411 13:57:52.244163 19385 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:52.245803 19547 leader_election.cc:290] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [CANDIDATE]: Term 1 election: Requested vote from peers 2c714562d950469698e10f48fe0013c3 (127.15.113.62:35717), 60ddbebb0cee49548523ebcb1836922e (127.15.113.61:39049)
I20250411 13:57:52.252839 19385 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate 8fb59848ea1f4c9f8e513f33672ac64c in term 1.
I20250411 13:57:52.255867 19489 leader_election.cc:304] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [CANDIDATE]: Term 1 election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 2c714562d950469698e10f48fe0013c3, 8fb59848ea1f4c9f8e513f33672ac64c; no voters: 
I20250411 13:57:52.257166 19547 raft_consensus.cc:2802] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [term 1 FOLLOWER]: Leader election won for term 1
I20250411 13:57:52.260957 19547 raft_consensus.cc:695] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [term 1 LEADER]: Becoming Leader. State: Replica: 8fb59848ea1f4c9f8e513f33672ac64c, State: Running, Role: LEADER
I20250411 13:57:52.262125 19547 consensus_queue.cc:237] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 1, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "2c714562d950469698e10f48fe0013c3" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } } peers { permanent_uuid: "60ddbebb0cee49548523ebcb1836922e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } } peers { permanent_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } }
I20250411 13:57:52.269024 19559 sys_catalog.cc:455] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "2c714562d950469698e10f48fe0013c3" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } } peers { permanent_uuid: "60ddbebb0cee49548523ebcb1836922e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } } peers { permanent_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } } }
I20250411 13:57:52.271873 19559 sys_catalog.cc:458] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:52.270834 19450 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "60ddbebb0cee49548523ebcb1836922e" is_pre_election: true
I20250411 13:57:52.272420 19450 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 8fb59848ea1f4c9f8e513f33672ac64c in term 0.
I20250411 13:57:52.271399 19449 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "60ddbebb0cee49548523ebcb1836922e"
I20250411 13:57:52.273494 19449 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:52.271553 19565 sys_catalog.cc:455] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [sys.catalog]: SysCatalogTable state changed. Reason: New leader 8fb59848ea1f4c9f8e513f33672ac64c. Latest consensus state: current_term: 1 leader_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "2c714562d950469698e10f48fe0013c3" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } } peers { permanent_uuid: "60ddbebb0cee49548523ebcb1836922e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } } peers { permanent_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } } }
I20250411 13:57:52.274122 19565 sys_catalog.cc:458] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:52.270771 19475 sys_catalog.cc:564] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:52.288787 19449 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate 8fb59848ea1f4c9f8e513f33672ac64c in term 1.
I20250411 13:57:52.302911 19569 catalog_manager.cc:1477] Loading table and tablet metadata into memory...
I20250411 13:57:52.306102 15812 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 1
W20250411 13:57:52.310424 19578 catalog_manager.cc:1560] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
W20250411 13:57:52.310865 19578 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
I20250411 13:57:52.311998 19569 catalog_manager.cc:1486] Initializing Kudu cluster ID...
I20250411 13:57:52.316787 15812 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 2
I20250411 13:57:52.318609 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:52.321313 19586 catalog_manager.cc:1560] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
I20250411 13:57:52.321391 19449 raft_consensus.cc:1273] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e [term 1 FOLLOWER]: Refusing update from remote peer 8fb59848ea1f4c9f8e513f33672ac64c: Log matching property violated. Preceding OpId in replica: term: 0 index: 0. Preceding OpId from leader: term: 1 index: 2. (index mismatch)
W20250411 13:57:52.321825 19586 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
I20250411 13:57:52.321934 19385 raft_consensus.cc:1273] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3 [term 1 FOLLOWER]: Refusing update from remote peer 8fb59848ea1f4c9f8e513f33672ac64c: Log matching property violated. Preceding OpId in replica: term: 0 index: 0. Preceding OpId from leader: term: 1 index: 2. (index mismatch)
I20250411 13:57:52.323086 19547 consensus_queue.cc:1035] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [LEADER]: Connected to new peer: Peer: permanent_uuid: "60ddbebb0cee49548523ebcb1836922e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:57:52.323701 19565 consensus_queue.cc:1035] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [LEADER]: Connected to new peer: Peer: permanent_uuid: "2c714562d950469698e10f48fe0013c3" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
W20250411 13:57:52.326905 19588 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:52.331846 19559 sys_catalog.cc:455] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e [sys.catalog]: SysCatalogTable state changed. Reason: New leader 8fb59848ea1f4c9f8e513f33672ac64c. Latest consensus state: current_term: 1 leader_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "2c714562d950469698e10f48fe0013c3" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } } peers { permanent_uuid: "60ddbebb0cee49548523ebcb1836922e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } } peers { permanent_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } } }
I20250411 13:57:52.332576 19559 sys_catalog.cc:458] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:52.334474 19548 sys_catalog.cc:455] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3 [sys.catalog]: SysCatalogTable state changed. Reason: New leader 8fb59848ea1f4c9f8e513f33672ac64c. Latest consensus state: current_term: 1 leader_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "2c714562d950469698e10f48fe0013c3" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } } peers { permanent_uuid: "60ddbebb0cee49548523ebcb1836922e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } } peers { permanent_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } } }
I20250411 13:57:52.335053 19548 sys_catalog.cc:458] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:52.340862 19565 sys_catalog.cc:455] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [sys.catalog]: SysCatalogTable state changed. Reason: Peer health change. Latest consensus state: current_term: 1 leader_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "2c714562d950469698e10f48fe0013c3" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } } peers { permanent_uuid: "60ddbebb0cee49548523ebcb1836922e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } } peers { permanent_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } } }
I20250411 13:57:52.341786 19565 sys_catalog.cc:458] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:52.342805 19569 catalog_manager.cc:1349] Generated new cluster ID: 2a8107031849425090fd661cf7411771
I20250411 13:57:52.342579 19565 sys_catalog.cc:455] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [sys.catalog]: SysCatalogTable state changed. Reason: Peer health change. Latest consensus state: current_term: 1 leader_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "2c714562d950469698e10f48fe0013c3" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } } peers { permanent_uuid: "60ddbebb0cee49548523ebcb1836922e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } } peers { permanent_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } } }
I20250411 13:57:52.343178 19569 catalog_manager.cc:1497] Initializing Kudu internal certificate authority...
I20250411 13:57:52.343437 19565 sys_catalog.cc:458] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [sys.catalog]: This master's current role is: LEADER
W20250411 13:57:52.345409 19589 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:52.352213 19548 sys_catalog.cc:455] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3 [sys.catalog]: SysCatalogTable state changed. Reason: Replicated consensus-only round. Latest consensus state: current_term: 1 leader_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "2c714562d950469698e10f48fe0013c3" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } } peers { permanent_uuid: "60ddbebb0cee49548523ebcb1836922e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } } peers { permanent_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } } }
I20250411 13:57:52.352891 19548 sys_catalog.cc:458] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:52.357854 19559 sys_catalog.cc:455] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e [sys.catalog]: SysCatalogTable state changed. Reason: Replicated consensus-only round. Latest consensus state: current_term: 1 leader_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "2c714562d950469698e10f48fe0013c3" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 35717 } } peers { permanent_uuid: "60ddbebb0cee49548523ebcb1836922e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 39049 } } peers { permanent_uuid: "8fb59848ea1f4c9f8e513f33672ac64c" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 43093 } } }
I20250411 13:57:52.358533 19559 sys_catalog.cc:458] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:52.368777 15812 server_base.cc:1034] running on GCE node
W20250411 13:57:52.369856 19598 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:52.370563 19569 catalog_manager.cc:1372] Generated new certificate authority record
I20250411 13:57:52.371132 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:52.371456 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:52.371737 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379872371717 us; error 0 us; skew 500 ppm
I20250411 13:57:52.372210 19569 catalog_manager.cc:1506] Loading token signing keys...
I20250411 13:57:52.372663 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:52.377000 15812 webserver.cc:466] Webserver started at http://127.15.113.1:38033/ using document root <none> and password file <none>
I20250411 13:57:52.377465 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:52.377653 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:52.377902 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:52.378860 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/ts-0-root/instance:
uuid: "d1cc912230494a5e85867aaad9d3d94a"
format_stamp: "Formatted at 2025-04-11 13:57:52 on dist-test-slave-jcj2"
I20250411 13:57:52.385329 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.006s	user 0.001s	sys 0.005s
I20250411 13:57:52.392091 19569 catalog_manager.cc:5954] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c: Generated new TSK 0
I20250411 13:57:52.392877 19569 catalog_manager.cc:1516] Initializing in-progress tserver states...
I20250411 13:57:52.392829 19604 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:52.394304 15812 fs_manager.cc:730] Time spent opening block manager: real 0.006s	user 0.005s	sys 0.000s
I20250411 13:57:52.394652 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/ts-0-root
uuid: "d1cc912230494a5e85867aaad9d3d94a"
format_stamp: "Formatted at 2025-04-11 13:57:52 on dist-test-slave-jcj2"
I20250411 13:57:52.395079 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/ts-0-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/ts-0-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/ts-0-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:52.422122 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:52.423214 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:52.424445 15812 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:52.426651 15812 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:52.426822 15812 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:52.427129 15812 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:52.427274 15812 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:52.469941 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.1:39695
I20250411 13:57:52.470007 19666 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.1:39695 every 8 connection(s)
I20250411 13:57:52.484099 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:52.494216 19674 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:52.496254 19675 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:52.503119 19668 heartbeater.cc:344] Connected to a master server at 127.15.113.60:43093
I20250411 13:57:52.503690 19668 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:52.504801 19668 heartbeater.cc:507] Master 127.15.113.60:43093 requested a full tablet report, sending...
I20250411 13:57:52.507110 19667 heartbeater.cc:344] Connected to a master server at 127.15.113.61:39049
I20250411 13:57:52.507613 19667 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:52.508617 19667 heartbeater.cc:507] Master 127.15.113.61:39049 requested a full tablet report, sending...
I20250411 13:57:52.509881 19669 heartbeater.cc:344] Connected to a master server at 127.15.113.62:35717
I20250411 13:57:52.510268 19669 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:52.510615 19505 ts_manager.cc:194] Registered new tserver with Master: d1cc912230494a5e85867aaad9d3d94a (127.15.113.1:39695)
I20250411 13:57:52.510891 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:52.511135 19669 heartbeater.cc:507] Master 127.15.113.62:35717 requested a full tablet report, sending...
I20250411 13:57:52.511478 19440 ts_manager.cc:194] Registered new tserver with Master: d1cc912230494a5e85867aaad9d3d94a (127.15.113.1:39695)
W20250411 13:57:52.512468 19677 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:52.513274 19505 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.0.0.1:37948
I20250411 13:57:52.513477 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:52.513823 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:52.513999 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379872513984 us; error 0 us; skew 500 ppm
I20250411 13:57:52.514206 19375 ts_manager.cc:194] Registered new tserver with Master: d1cc912230494a5e85867aaad9d3d94a (127.15.113.1:39695)
I20250411 13:57:52.514602 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:52.517361 15812 webserver.cc:466] Webserver started at http://127.15.113.2:40773/ using document root <none> and password file <none>
I20250411 13:57:52.517819 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:52.518005 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:52.518249 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:52.519304 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/ts-1-root/instance:
uuid: "ddd79a5864be4f58964e0ec02f1a3593"
format_stamp: "Formatted at 2025-04-11 13:57:52 on dist-test-slave-jcj2"
I20250411 13:57:52.523391 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.004s	sys 0.000s
I20250411 13:57:52.526746 19682 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:52.527604 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.002s	sys 0.001s
I20250411 13:57:52.527877 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/ts-1-root
uuid: "ddd79a5864be4f58964e0ec02f1a3593"
format_stamp: "Formatted at 2025-04-11 13:57:52 on dist-test-slave-jcj2"
I20250411 13:57:52.528126 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/ts-1-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/ts-1-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/ts-1-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:52.558811 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:52.559942 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:52.561247 15812 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:52.563450 15812 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:52.563643 15812 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:52.563875 15812 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:52.564018 15812 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:52.600073 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.2:39893
I20250411 13:57:52.600173 19744 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.2:39893 every 8 connection(s)
I20250411 13:57:52.619710 19745 heartbeater.cc:344] Connected to a master server at 127.15.113.61:39049
I20250411 13:57:52.620185 19745 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:52.621053 19745 heartbeater.cc:507] Master 127.15.113.61:39049 requested a full tablet report, sending...
I20250411 13:57:52.625418 19440 ts_manager.cc:194] Registered new tserver with Master: ddd79a5864be4f58964e0ec02f1a3593 (127.15.113.2:39893)
I20250411 13:57:52.629926 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:52.632037 19747 heartbeater.cc:344] Connected to a master server at 127.15.113.62:35717
I20250411 13:57:52.632071 19746 heartbeater.cc:344] Connected to a master server at 127.15.113.60:43093
I20250411 13:57:52.632431 19747 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:52.632555 19746 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:52.633132 19747 heartbeater.cc:507] Master 127.15.113.62:35717 requested a full tablet report, sending...
I20250411 13:57:52.633401 19746 heartbeater.cc:507] Master 127.15.113.60:43093 requested a full tablet report, sending...
I20250411 13:57:52.635382 19375 ts_manager.cc:194] Registered new tserver with Master: ddd79a5864be4f58964e0ec02f1a3593 (127.15.113.2:39893)
I20250411 13:57:52.637300 19505 ts_manager.cc:194] Registered new tserver with Master: ddd79a5864be4f58964e0ec02f1a3593 (127.15.113.2:39893)
W20250411 13:57:52.637638 19752 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:52.638599 19753 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:52.639194 19505 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.0.0.1:37964
W20250411 13:57:52.642155 19755 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:52.642233 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:52.643074 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:52.643262 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:52.643414 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379872643399 us; error 0 us; skew 500 ppm
I20250411 13:57:52.643889 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:52.646095 15812 webserver.cc:466] Webserver started at http://127.15.113.3:42511/ using document root <none> and password file <none>
I20250411 13:57:52.646560 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:52.646728 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:52.647007 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:52.647919 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/ts-2-root/instance:
uuid: "a41c5f1ff434411d9bf1c5158da80bde"
format_stamp: "Formatted at 2025-04-11 13:57:52 on dist-test-slave-jcj2"
I20250411 13:57:52.651986 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.004s	sys 0.000s
I20250411 13:57:52.655028 19760 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:52.655730 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.003s	sys 0.000s
I20250411 13:57:52.656019 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/ts-2-root
uuid: "a41c5f1ff434411d9bf1c5158da80bde"
format_stamp: "Formatted at 2025-04-11 13:57:52 on dist-test-slave-jcj2"
I20250411 13:57:52.656284 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/ts-2-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/ts-2-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToClusterReturnsAddresses.1744379822517842-15812-0/minicluster-data/ts-2-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:52.675226 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:52.676357 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:52.677886 15812 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:52.680115 15812 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:52.680312 15812 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:52.680534 15812 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:52.680699 15812 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:52.717679 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.3:44499
I20250411 13:57:52.717773 19822 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.3:44499 every 8 connection(s)
I20250411 13:57:52.744561 19823 heartbeater.cc:344] Connected to a master server at 127.15.113.61:39049
I20250411 13:57:52.745033 19823 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:52.745863 19823 heartbeater.cc:507] Master 127.15.113.61:39049 requested a full tablet report, sending...
I20250411 13:57:52.747351 19825 heartbeater.cc:344] Connected to a master server at 127.15.113.62:35717
I20250411 13:57:52.747727 19825 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:52.748499 19440 ts_manager.cc:194] Registered new tserver with Master: a41c5f1ff434411d9bf1c5158da80bde (127.15.113.3:44499)
I20250411 13:57:52.748600 19825 heartbeater.cc:507] Master 127.15.113.62:35717 requested a full tablet report, sending...
I20250411 13:57:52.749703 19824 heartbeater.cc:344] Connected to a master server at 127.15.113.60:43093
I20250411 13:57:52.750068 19824 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:52.750901 19824 heartbeater.cc:507] Master 127.15.113.60:43093 requested a full tablet report, sending...
I20250411 13:57:52.751372 19375 ts_manager.cc:194] Registered new tserver with Master: a41c5f1ff434411d9bf1c5158da80bde (127.15.113.3:44499)
I20250411 13:57:52.752771 19505 ts_manager.cc:194] Registered new tserver with Master: a41c5f1ff434411d9bf1c5158da80bde (127.15.113.3:44499)
I20250411 13:57:52.752920 15812 internal_mini_cluster.cc:371] 3 TS(s) registered with all masters after 0.016767301s
I20250411 13:57:52.754105 19505 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.0.0.1:37968
I20250411 13:57:52.832343 15812 tablet_server.cc:178] TabletServer@127.15.113.1:0 shutting down...
I20250411 13:57:52.854972 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:52.872334 15812 tablet_server.cc:195] TabletServer@127.15.113.1:0 shutdown complete.
I20250411 13:57:52.883684 15812 tablet_server.cc:178] TabletServer@127.15.113.2:0 shutting down...
I20250411 13:57:52.909487 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:52.926620 15812 tablet_server.cc:195] TabletServer@127.15.113.2:0 shutdown complete.
I20250411 13:57:52.935708 15812 tablet_server.cc:178] TabletServer@127.15.113.3:0 shutting down...
I20250411 13:57:52.955708 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:52.972770 15812 tablet_server.cc:195] TabletServer@127.15.113.3:0 shutdown complete.
I20250411 13:57:52.981395 15812 master.cc:561] Master@127.15.113.62:35717 shutting down...
I20250411 13:57:52.998703 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3 [term 1 FOLLOWER]: Raft consensus shutting down.
I20250411 13:57:52.999363 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3 [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:52.999677 15812 tablet_replica.cc:331] T 00000000000000000000000000000000 P 2c714562d950469698e10f48fe0013c3: stopping tablet replica
W20250411 13:57:53.019438 19489 consensus_peers.cc:487] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c -> Peer 2c714562d950469698e10f48fe0013c3 (127.15.113.62:35717): Couldn't send request to peer 2c714562d950469698e10f48fe0013c3. Status: Network error: Client connection negotiation failed: client connection to 127.15.113.62:35717: connect: Connection refused (error 111). This is attempt 1: this message will repeat every 5th retry.
I20250411 13:57:53.019723 15812 master.cc:583] Master@127.15.113.62:35717 shutdown complete.
I20250411 13:57:53.033125 15812 master.cc:561] Master@127.15.113.61:39049 shutting down...
I20250411 13:57:53.048317 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e [term 1 FOLLOWER]: Raft consensus shutting down.
I20250411 13:57:53.048894 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:53.049175 15812 tablet_replica.cc:331] T 00000000000000000000000000000000 P 60ddbebb0cee49548523ebcb1836922e: stopping tablet replica
I20250411 13:57:53.068130 15812 master.cc:583] Master@127.15.113.61:39049 shutdown complete.
I20250411 13:57:53.079993 15812 master.cc:561] Master@127.15.113.60:43093 shutting down...
I20250411 13:57:53.096748 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [term 1 LEADER]: Raft consensus shutting down.
I20250411 13:57:53.097630 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:53.097941 15812 tablet_replica.cc:331] T 00000000000000000000000000000000 P 8fb59848ea1f4c9f8e513f33672ac64c: stopping tablet replica
I20250411 13:57:53.116691 15812 master.cc:583] Master@127.15.113.60:43093 shutdown complete.
[       OK ] MasterReplicationTest.TestConnectToClusterReturnsAddresses (1245 ms)
[ RUN      ] MasterReplicationTest.TestConnectToFollowerMasterOnly
I20250411 13:57:53.145586 15812 internal_mini_cluster.cc:156] Creating distributed mini masters. Addrs: 127.15.113.62:36853,127.15.113.61:37369,127.15.113.60:42571
I20250411 13:57:53.146824 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:53.151463 19836 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:53.151713 19835 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:53.152889 19838 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:53.154225 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:53.154995 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:53.155179 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:53.155350 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379873155333 us; error 0 us; skew 500 ppm
I20250411 13:57:53.155814 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:53.162578 15812 webserver.cc:466] Webserver started at http://127.15.113.62:44693/ using document root <none> and password file <none>
I20250411 13:57:53.163115 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:53.163295 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:53.163533 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:53.164605 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/master-0-root/instance:
uuid: "004c9e041df9400aa8cb5040cff98be0"
format_stamp: "Formatted at 2025-04-11 13:57:53 on dist-test-slave-jcj2"
I20250411 13:57:53.168613 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.005s	sys 0.000s
I20250411 13:57:53.171525 19843 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:53.172246 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.003s	sys 0.000s
I20250411 13:57:53.172464 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/master-0-root
uuid: "004c9e041df9400aa8cb5040cff98be0"
format_stamp: "Formatted at 2025-04-11 13:57:53 on dist-test-slave-jcj2"
I20250411 13:57:53.172683 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/master-0-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/master-0-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/master-0-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:53.189976 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:53.191133 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:53.226488 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.62:36853
I20250411 13:57:53.226570 19894 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.62:36853 every 8 connection(s)
I20250411 13:57:53.230298 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:53.230512 19895 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
W20250411 13:57:53.235888 19897 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:53.236027 19898 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:53.239151 19900 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:53.239449 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:53.239149 19895 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:53.240332 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:53.240527 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:53.240705 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379873240686 us; error 0 us; skew 500 ppm
I20250411 13:57:53.241312 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:53.244431 15812 webserver.cc:466] Webserver started at http://127.15.113.61:43121/ using document root <none> and password file <none>
I20250411 13:57:53.244886 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:53.245038 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:53.245292 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:53.246294 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/master-1-root/instance:
uuid: "09f6d09acd7248b283def53ddac78e3e"
format_stamp: "Formatted at 2025-04-11 13:57:53 on dist-test-slave-jcj2"
I20250411 13:57:53.250591 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.004s	sys 0.002s
I20250411 13:57:53.251252 19895 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:53.254534 19907 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:53.255369 15812 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.003s	sys 0.000s
I20250411 13:57:53.255658 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/master-1-root
uuid: "09f6d09acd7248b283def53ddac78e3e"
format_stamp: "Formatted at 2025-04-11 13:57:53 on dist-test-slave-jcj2"
I20250411 13:57:53.256012 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/master-1-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/master-1-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/master-1-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
W20250411 13:57:53.256757 19895 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:37369: Network error: Client connection negotiation failed: client connection to 127.15.113.61:37369: connect: Connection refused (error 111)
I20250411 13:57:53.276225 19895 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } attempt: 1
I20250411 13:57:53.280337 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
W20250411 13:57:53.280436 19895 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:37369: Network error: Client connection negotiation failed: client connection to 127.15.113.61:37369: connect: Connection refused (error 111)
I20250411 13:57:53.281561 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:53.312999 19895 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } attempt: 2
W20250411 13:57:53.318447 19895 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:37369: Network error: Client connection negotiation failed: client connection to 127.15.113.61:37369: connect: Connection refused (error 111)
I20250411 13:57:53.320739 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.61:37369
I20250411 13:57:53.320832 19959 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.61:37369 every 8 connection(s)
I20250411 13:57:53.324352 19960 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:53.324601 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:53.329504 19962 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:53.330214 19963 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:53.333357 15812 server_base.cc:1034] running on GCE node
W20250411 13:57:53.333323 19966 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:53.333271 19960 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:53.334681 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:53.334921 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:53.335074 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379873335058 us; error 0 us; skew 500 ppm
I20250411 13:57:53.335788 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:53.338670 15812 webserver.cc:466] Webserver started at http://127.15.113.60:44503/ using document root <none> and password file <none>
I20250411 13:57:53.339298 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:53.339525 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:53.339823 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:53.340922 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/master-2-root/instance:
uuid: "826c78ab96804f2e979676d601c71e45"
format_stamp: "Formatted at 2025-04-11 13:57:53 on dist-test-slave-jcj2"
I20250411 13:57:53.344733 19960 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:53.346618 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.005s	user 0.007s	sys 0.000s
I20250411 13:57:53.350385 19972 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:53.351331 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.003s	sys 0.000s
I20250411 13:57:53.351675 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/master-2-root
uuid: "826c78ab96804f2e979676d601c71e45"
format_stamp: "Formatted at 2025-04-11 13:57:53 on dist-test-slave-jcj2"
I20250411 13:57:53.352006 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/master-2-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/master-2-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/master-2-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:53.357095 19960 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:57:53.362273 19960 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:42571: Network error: Client connection negotiation failed: client connection to 127.15.113.60:42571: connect: Connection refused (error 111)
I20250411 13:57:53.364799 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:53.365990 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:53.402287 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.60:42571
I20250411 13:57:53.402375 20024 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.60:42571 every 8 connection(s)
I20250411 13:57:53.405123 15812 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 0
I20250411 13:57:53.406136 20025 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:53.411970 20025 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:53.413966 19895 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } attempt: 3
I20250411 13:57:53.423388 20025 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:53.423978 19895 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:53.425850 19960 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } attempt: 1
I20250411 13:57:53.435037 20025 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:53.446252 19960 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e: Bootstrap starting.
I20250411 13:57:53.446667 19895 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0: Bootstrap starting.
I20250411 13:57:53.451244 19895 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:53.452680 19960 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:53.454725 20025 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45: Bootstrap starting.
I20250411 13:57:53.455610 19895 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0: No bootstrap required, opened a new log
I20250411 13:57:53.457993 19960 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e: No bootstrap required, opened a new log
I20250411 13:57:53.458189 19895 raft_consensus.cc:357] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "004c9e041df9400aa8cb5040cff98be0" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } } peers { permanent_uuid: "09f6d09acd7248b283def53ddac78e3e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } } peers { permanent_uuid: "826c78ab96804f2e979676d601c71e45" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } }
I20250411 13:57:53.458765 19895 raft_consensus.cc:383] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:53.459044 19895 raft_consensus.cc:738] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 004c9e041df9400aa8cb5040cff98be0, State: Initialized, Role: FOLLOWER
I20250411 13:57:53.459594 19895 consensus_queue.cc:260] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "004c9e041df9400aa8cb5040cff98be0" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } } peers { permanent_uuid: "09f6d09acd7248b283def53ddac78e3e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } } peers { permanent_uuid: "826c78ab96804f2e979676d601c71e45" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } }
I20250411 13:57:53.460747 19960 raft_consensus.cc:357] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "004c9e041df9400aa8cb5040cff98be0" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } } peers { permanent_uuid: "09f6d09acd7248b283def53ddac78e3e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } } peers { permanent_uuid: "826c78ab96804f2e979676d601c71e45" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } }
I20250411 13:57:53.461309 20025 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:53.461452 19960 raft_consensus.cc:383] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:53.461756 19960 raft_consensus.cc:738] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 09f6d09acd7248b283def53ddac78e3e, State: Initialized, Role: FOLLOWER
I20250411 13:57:53.461540 20033 sys_catalog.cc:455] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "004c9e041df9400aa8cb5040cff98be0" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } } peers { permanent_uuid: "09f6d09acd7248b283def53ddac78e3e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } } peers { permanent_uuid: "826c78ab96804f2e979676d601c71e45" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } } }
I20250411 13:57:53.462585 20033 sys_catalog.cc:458] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:53.462476 19960 consensus_queue.cc:260] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "004c9e041df9400aa8cb5040cff98be0" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } } peers { permanent_uuid: "09f6d09acd7248b283def53ddac78e3e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } } peers { permanent_uuid: "826c78ab96804f2e979676d601c71e45" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } }
I20250411 13:57:53.463122 19895 sys_catalog.cc:564] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:53.464700 20034 sys_catalog.cc:455] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "004c9e041df9400aa8cb5040cff98be0" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } } peers { permanent_uuid: "09f6d09acd7248b283def53ddac78e3e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } } peers { permanent_uuid: "826c78ab96804f2e979676d601c71e45" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } } }
I20250411 13:57:53.465523 20034 sys_catalog.cc:458] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:53.466809 19960 sys_catalog.cc:564] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:53.469094 20025 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45: No bootstrap required, opened a new log
I20250411 13:57:53.472005 20025 raft_consensus.cc:357] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "004c9e041df9400aa8cb5040cff98be0" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } } peers { permanent_uuid: "09f6d09acd7248b283def53ddac78e3e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } } peers { permanent_uuid: "826c78ab96804f2e979676d601c71e45" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } }
I20250411 13:57:53.472673 20025 raft_consensus.cc:383] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:53.473017 20025 raft_consensus.cc:738] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 826c78ab96804f2e979676d601c71e45, State: Initialized, Role: FOLLOWER
I20250411 13:57:53.473855 20025 consensus_queue.cc:260] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "004c9e041df9400aa8cb5040cff98be0" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } } peers { permanent_uuid: "09f6d09acd7248b283def53ddac78e3e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } } peers { permanent_uuid: "826c78ab96804f2e979676d601c71e45" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } }
I20250411 13:57:53.476819 20045 sys_catalog.cc:455] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "004c9e041df9400aa8cb5040cff98be0" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } } peers { permanent_uuid: "09f6d09acd7248b283def53ddac78e3e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } } peers { permanent_uuid: "826c78ab96804f2e979676d601c71e45" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } } }
I20250411 13:57:53.477877 20045 sys_catalog.cc:458] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:53.479074 20025 sys_catalog.cc:564] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:53.480791 15812 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 1
W20250411 13:57:53.486060 20049 catalog_manager.cc:1560] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
W20250411 13:57:53.486430 20049 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
I20250411 13:57:53.495697 15812 internal_mini_cluster.cc:184] Waiting to initialize catalog manager on master 2
I20250411 13:57:53.497540 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:53.497781 20067 catalog_manager.cc:1560] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
W20250411 13:57:53.498064 20067 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
W20250411 13:57:53.498526 20069 catalog_manager.cc:1560] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
W20250411 13:57:53.498788 20069 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
W20250411 13:57:53.503628 20070 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:53.504204 20071 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:53.505662 20073 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:53.506376 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:53.507175 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:53.507349 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:53.507524 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379873507498 us; error 0 us; skew 500 ppm
I20250411 13:57:53.507733 20034 raft_consensus.cc:491] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:57:53.508203 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:53.508193 20034 raft_consensus.cc:513] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "004c9e041df9400aa8cb5040cff98be0" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } } peers { permanent_uuid: "09f6d09acd7248b283def53ddac78e3e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } } peers { permanent_uuid: "826c78ab96804f2e979676d601c71e45" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } }
I20250411 13:57:53.510402 20034 leader_election.cc:290] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers 004c9e041df9400aa8cb5040cff98be0 (127.15.113.62:36853), 826c78ab96804f2e979676d601c71e45 (127.15.113.60:42571)
I20250411 13:57:53.511250 15812 webserver.cc:466] Webserver started at http://127.15.113.1:39909/ using document root <none> and password file <none>
I20250411 13:57:53.511031 19870 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "09f6d09acd7248b283def53ddac78e3e" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "004c9e041df9400aa8cb5040cff98be0" is_pre_election: true
I20250411 13:57:53.511767 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:53.511736 19870 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 09f6d09acd7248b283def53ddac78e3e in term 0.
I20250411 13:57:53.512063 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:53.512347 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:53.512737 20000 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "09f6d09acd7248b283def53ddac78e3e" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "826c78ab96804f2e979676d601c71e45" is_pre_election: true
I20250411 13:57:53.512970 19911 leader_election.cc:304] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 004c9e041df9400aa8cb5040cff98be0, 09f6d09acd7248b283def53ddac78e3e; no voters: 
I20250411 13:57:53.513209 20000 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 09f6d09acd7248b283def53ddac78e3e in term 0.
I20250411 13:57:53.513777 20034 raft_consensus.cc:2802] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:57:53.513849 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/ts-0-root/instance:
uuid: "283a8632eb9841acac461410d89ec98d"
format_stamp: "Formatted at 2025-04-11 13:57:53 on dist-test-slave-jcj2"
I20250411 13:57:53.514330 20034 raft_consensus.cc:491] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:57:53.514684 20034 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:53.518621 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.004s	sys 0.000s
I20250411 13:57:53.519150 20034 raft_consensus.cc:513] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "004c9e041df9400aa8cb5040cff98be0" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } } peers { permanent_uuid: "09f6d09acd7248b283def53ddac78e3e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } } peers { permanent_uuid: "826c78ab96804f2e979676d601c71e45" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } }
I20250411 13:57:53.520453 20034 leader_election.cc:290] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [CANDIDATE]: Term 1 election: Requested vote from peers 004c9e041df9400aa8cb5040cff98be0 (127.15.113.62:36853), 826c78ab96804f2e979676d601c71e45 (127.15.113.60:42571)
I20250411 13:57:53.521273 19870 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "09f6d09acd7248b283def53ddac78e3e" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "004c9e041df9400aa8cb5040cff98be0"
I20250411 13:57:53.521425 20000 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "09f6d09acd7248b283def53ddac78e3e" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "826c78ab96804f2e979676d601c71e45"
I20250411 13:57:53.521824 19870 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:53.521893 20000 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:53.522702 20078 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:53.523679 15812 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.003s	sys 0.000s
I20250411 13:57:53.524036 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/ts-0-root
uuid: "283a8632eb9841acac461410d89ec98d"
format_stamp: "Formatted at 2025-04-11 13:57:53 on dist-test-slave-jcj2"
I20250411 13:57:53.524389 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/ts-0-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/ts-0-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/ts-0-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:53.527228 20000 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate 09f6d09acd7248b283def53ddac78e3e in term 1.
I20250411 13:57:53.528262 19910 leader_election.cc:304] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [CANDIDATE]: Term 1 election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 09f6d09acd7248b283def53ddac78e3e, 826c78ab96804f2e979676d601c71e45; no voters: 
I20250411 13:57:53.529142 20034 raft_consensus.cc:2802] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [term 1 FOLLOWER]: Leader election won for term 1
I20250411 13:57:53.529290 19870 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate 09f6d09acd7248b283def53ddac78e3e in term 1.
I20250411 13:57:53.530584 20034 raft_consensus.cc:695] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [term 1 LEADER]: Becoming Leader. State: Replica: 09f6d09acd7248b283def53ddac78e3e, State: Running, Role: LEADER
I20250411 13:57:53.531431 20034 consensus_queue.cc:237] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 1, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "004c9e041df9400aa8cb5040cff98be0" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } } peers { permanent_uuid: "09f6d09acd7248b283def53ddac78e3e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } } peers { permanent_uuid: "826c78ab96804f2e979676d601c71e45" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } }
I20250411 13:57:53.535007 20083 sys_catalog.cc:455] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [sys.catalog]: SysCatalogTable state changed. Reason: New leader 09f6d09acd7248b283def53ddac78e3e. Latest consensus state: current_term: 1 leader_uuid: "09f6d09acd7248b283def53ddac78e3e" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "004c9e041df9400aa8cb5040cff98be0" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } } peers { permanent_uuid: "09f6d09acd7248b283def53ddac78e3e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } } peers { permanent_uuid: "826c78ab96804f2e979676d601c71e45" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } } }
I20250411 13:57:53.535818 20083 sys_catalog.cc:458] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:53.537609 20085 catalog_manager.cc:1477] Loading table and tablet metadata into memory...
I20250411 13:57:53.543805 20085 catalog_manager.cc:1486] Initializing Kudu cluster ID...
I20250411 13:57:53.552870 19870 raft_consensus.cc:1273] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0 [term 1 FOLLOWER]: Refusing update from remote peer 09f6d09acd7248b283def53ddac78e3e: Log matching property violated. Preceding OpId in replica: term: 0 index: 0. Preceding OpId from leader: term: 1 index: 2. (index mismatch)
I20250411 13:57:53.553463 20000 raft_consensus.cc:1273] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45 [term 1 FOLLOWER]: Refusing update from remote peer 09f6d09acd7248b283def53ddac78e3e: Log matching property violated. Preceding OpId in replica: term: 0 index: 0. Preceding OpId from leader: term: 1 index: 2. (index mismatch)
I20250411 13:57:53.554339 20034 consensus_queue.cc:1035] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [LEADER]: Connected to new peer: Peer: permanent_uuid: "004c9e041df9400aa8cb5040cff98be0" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:57:53.555017 20083 consensus_queue.cc:1035] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [LEADER]: Connected to new peer: Peer: permanent_uuid: "826c78ab96804f2e979676d601c71e45" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:57:53.563072 20033 sys_catalog.cc:455] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0 [sys.catalog]: SysCatalogTable state changed. Reason: New leader 09f6d09acd7248b283def53ddac78e3e. Latest consensus state: current_term: 1 leader_uuid: "09f6d09acd7248b283def53ddac78e3e" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "004c9e041df9400aa8cb5040cff98be0" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } } peers { permanent_uuid: "09f6d09acd7248b283def53ddac78e3e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } } peers { permanent_uuid: "826c78ab96804f2e979676d601c71e45" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } } }
I20250411 13:57:53.563783 20033 sys_catalog.cc:458] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:53.565928 20045 sys_catalog.cc:455] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45 [sys.catalog]: SysCatalogTable state changed. Reason: New leader 09f6d09acd7248b283def53ddac78e3e. Latest consensus state: current_term: 1 leader_uuid: "09f6d09acd7248b283def53ddac78e3e" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "004c9e041df9400aa8cb5040cff98be0" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } } peers { permanent_uuid: "09f6d09acd7248b283def53ddac78e3e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } } peers { permanent_uuid: "826c78ab96804f2e979676d601c71e45" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } } }
I20250411 13:57:53.566620 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:53.566643 20045 sys_catalog.cc:458] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:53.568114 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:53.579200 20033 sys_catalog.cc:455] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0 [sys.catalog]: SysCatalogTable state changed. Reason: Replicated consensus-only round. Latest consensus state: current_term: 1 leader_uuid: "09f6d09acd7248b283def53ddac78e3e" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "004c9e041df9400aa8cb5040cff98be0" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } } peers { permanent_uuid: "09f6d09acd7248b283def53ddac78e3e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } } peers { permanent_uuid: "826c78ab96804f2e979676d601c71e45" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } } }
I20250411 13:57:53.579982 20033 sys_catalog.cc:458] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:53.583281 20083 sys_catalog.cc:455] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [sys.catalog]: SysCatalogTable state changed. Reason: Peer health change. Latest consensus state: current_term: 1 leader_uuid: "09f6d09acd7248b283def53ddac78e3e" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "004c9e041df9400aa8cb5040cff98be0" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } } peers { permanent_uuid: "09f6d09acd7248b283def53ddac78e3e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } } peers { permanent_uuid: "826c78ab96804f2e979676d601c71e45" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } } }
I20250411 13:57:53.583958 20083 sys_catalog.cc:458] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:53.584831 20085 catalog_manager.cc:1349] Generated new cluster ID: e93e825db2204ab4bf0903150363c8eb
I20250411 13:57:53.584568 20083 sys_catalog.cc:455] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [sys.catalog]: SysCatalogTable state changed. Reason: Peer health change. Latest consensus state: current_term: 1 leader_uuid: "09f6d09acd7248b283def53ddac78e3e" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "004c9e041df9400aa8cb5040cff98be0" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } } peers { permanent_uuid: "09f6d09acd7248b283def53ddac78e3e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } } peers { permanent_uuid: "826c78ab96804f2e979676d601c71e45" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } } }
I20250411 13:57:53.585109 20085 catalog_manager.cc:1497] Initializing Kudu internal certificate authority...
I20250411 13:57:53.585218 20083 sys_catalog.cc:458] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:53.588131 20045 sys_catalog.cc:455] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45 [sys.catalog]: SysCatalogTable state changed. Reason: Replicated consensus-only round. Latest consensus state: current_term: 1 leader_uuid: "09f6d09acd7248b283def53ddac78e3e" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "004c9e041df9400aa8cb5040cff98be0" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 36853 } } peers { permanent_uuid: "09f6d09acd7248b283def53ddac78e3e" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 37369 } } peers { permanent_uuid: "826c78ab96804f2e979676d601c71e45" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 42571 } } }
I20250411 13:57:53.589074 20045 sys_catalog.cc:458] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:53.589781 15812 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:53.592464 15812 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:53.592656 15812 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:53.592895 15812 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:53.593063 15812 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:53.630034 20085 catalog_manager.cc:1372] Generated new certificate authority record
I20250411 13:57:53.631704 20085 catalog_manager.cc:1506] Loading token signing keys...
I20250411 13:57:53.653337 20085 catalog_manager.cc:5954] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e: Generated new TSK 0
I20250411 13:57:53.656414 20085 catalog_manager.cc:1516] Initializing in-progress tserver states...
I20250411 13:57:53.663321 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.1:42635
I20250411 13:57:53.663471 20151 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.1:42635 every 8 connection(s)
I20250411 13:57:53.675694 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:53.687984 20152 heartbeater.cc:344] Connected to a master server at 127.15.113.60:42571
I20250411 13:57:53.688057 20153 heartbeater.cc:344] Connected to a master server at 127.15.113.61:37369
I20250411 13:57:53.688628 20152 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:53.688704 20153 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:53.689601 20152 heartbeater.cc:507] Master 127.15.113.60:42571 requested a full tablet report, sending...
I20250411 13:57:53.689657 20153 heartbeater.cc:507] Master 127.15.113.61:37369 requested a full tablet report, sending...
W20250411 13:57:53.690375 20159 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:53.693275 19990 ts_manager.cc:194] Registered new tserver with Master: 283a8632eb9841acac461410d89ec98d (127.15.113.1:42635)
W20250411 13:57:53.697068 20160 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:53.698472 19925 ts_manager.cc:194] Registered new tserver with Master: 283a8632eb9841acac461410d89ec98d (127.15.113.1:42635)
I20250411 13:57:53.700029 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:53.701057 19925 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.0.0.1:35998
W20250411 13:57:53.701244 20163 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:53.702627 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:53.702958 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:53.703042 20154 heartbeater.cc:344] Connected to a master server at 127.15.113.62:36853
I20250411 13:57:53.703198 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379873703176 us; error 0 us; skew 500 ppm
I20250411 13:57:53.703431 20154 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:53.703907 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:53.703963 20154 heartbeater.cc:507] Master 127.15.113.62:36853 requested a full tablet report, sending...
I20250411 13:57:53.706450 19860 ts_manager.cc:194] Registered new tserver with Master: 283a8632eb9841acac461410d89ec98d (127.15.113.1:42635)
I20250411 13:57:53.706666 15812 webserver.cc:466] Webserver started at http://127.15.113.2:34785/ using document root <none> and password file <none>
I20250411 13:57:53.707314 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:53.707540 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:53.707804 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:53.708808 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/ts-1-root/instance:
uuid: "168c1b97162c441a90f507537f023e4c"
format_stamp: "Formatted at 2025-04-11 13:57:53 on dist-test-slave-jcj2"
I20250411 13:57:53.713121 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.006s	sys 0.000s
I20250411 13:57:53.716337 20167 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:53.717087 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.001s	sys 0.000s
I20250411 13:57:53.717357 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/ts-1-root
uuid: "168c1b97162c441a90f507537f023e4c"
format_stamp: "Formatted at 2025-04-11 13:57:53 on dist-test-slave-jcj2"
I20250411 13:57:53.717624 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/ts-1-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/ts-1-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/ts-1-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:53.730561 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:53.731712 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:53.733052 15812 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:53.735353 15812 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:53.735549 15812 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:53.735781 15812 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:53.735935 15812 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:53.773240 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.2:45899
I20250411 13:57:53.773331 20229 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.2:45899 every 8 connection(s)
I20250411 13:57:53.798511 20230 heartbeater.cc:344] Connected to a master server at 127.15.113.60:42571
I20250411 13:57:53.798965 20230 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:53.799832 20230 heartbeater.cc:507] Master 127.15.113.60:42571 requested a full tablet report, sending...
I20250411 13:57:53.802472 19990 ts_manager.cc:194] Registered new tserver with Master: 168c1b97162c441a90f507537f023e4c (127.15.113.2:45899)
I20250411 13:57:53.805178 20231 heartbeater.cc:344] Connected to a master server at 127.15.113.61:37369
I20250411 13:57:53.805605 20231 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:53.805552 15812 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:53.806543 20231 heartbeater.cc:507] Master 127.15.113.61:37369 requested a full tablet report, sending...
I20250411 13:57:53.808115 20232 heartbeater.cc:344] Connected to a master server at 127.15.113.62:36853
I20250411 13:57:53.808460 20232 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:53.808786 19925 ts_manager.cc:194] Registered new tserver with Master: 168c1b97162c441a90f507537f023e4c (127.15.113.2:45899)
I20250411 13:57:53.809254 20232 heartbeater.cc:507] Master 127.15.113.62:36853 requested a full tablet report, sending...
I20250411 13:57:53.810420 19925 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.0.0.1:36012
I20250411 13:57:53.812157 19860 ts_manager.cc:194] Registered new tserver with Master: 168c1b97162c441a90f507537f023e4c (127.15.113.2:45899)
W20250411 13:57:53.813994 20238 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:53.814636 20237 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:53.817308 20240 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:53.817806 15812 server_base.cc:1034] running on GCE node
I20250411 13:57:53.818563 15812 hybrid_clock.cc:584] initializing the hybrid clock with 'system_unsync' time source
W20250411 13:57:53.818778 15812 system_unsync_time.cc:38] NTP support is disabled. Clock error bounds will not be accurate. This configuration is not suitable for distributed clusters.
I20250411 13:57:53.818974 15812 hybrid_clock.cc:648] HybridClock initialized: now 1744379873818957 us; error 0 us; skew 500 ppm
I20250411 13:57:53.819505 15812 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:53.821633 15812 webserver.cc:466] Webserver started at http://127.15.113.3:38173/ using document root <none> and password file <none>
I20250411 13:57:53.822100 15812 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:53.822284 15812 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:53.822521 15812 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:53.823572 15812 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/ts-2-root/instance:
uuid: "4454b321e8d14b85b304b41af6c25ce6"
format_stamp: "Formatted at 2025-04-11 13:57:53 on dist-test-slave-jcj2"
I20250411 13:57:53.827683 15812 fs_manager.cc:696] Time spent creating directory manager: real 0.004s	user 0.000s	sys 0.004s
I20250411 13:57:53.830670 20245 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:53.831377 15812 fs_manager.cc:730] Time spent opening block manager: real 0.002s	user 0.002s	sys 0.001s
I20250411 13:57:53.831640 15812 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/ts-2-root
uuid: "4454b321e8d14b85b304b41af6c25ce6"
format_stamp: "Formatted at 2025-04-11 13:57:53 on dist-test-slave-jcj2"
I20250411 13:57:53.831887 15812 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/ts-2-root
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/ts-2-root
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationTest.TestConnectToFollowerMasterOnly.1744379822517842-15812-0/minicluster-data/ts-2-root/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:53.850513 15812 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:53.851713 15812 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:53.852870 15812 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:57:53.855080 15812 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:57:53.855299 15812 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:53.855515 15812 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:57:53.855681 15812 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:53.894261 15812 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.3:38627
I20250411 13:57:53.894341 20307 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.3:38627 every 8 connection(s)
I20250411 13:57:53.910686 20308 heartbeater.cc:344] Connected to a master server at 127.15.113.60:42571
I20250411 13:57:53.911187 20308 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:53.912077 20308 heartbeater.cc:507] Master 127.15.113.60:42571 requested a full tablet report, sending...
I20250411 13:57:53.914801 19990 ts_manager.cc:194] Registered new tserver with Master: 4454b321e8d14b85b304b41af6c25ce6 (127.15.113.3:38627)
I20250411 13:57:53.915910 20309 heartbeater.cc:344] Connected to a master server at 127.15.113.61:37369
I20250411 13:57:53.916314 20309 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:53.917151 20309 heartbeater.cc:507] Master 127.15.113.61:37369 requested a full tablet report, sending...
I20250411 13:57:53.919286 19925 ts_manager.cc:194] Registered new tserver with Master: 4454b321e8d14b85b304b41af6c25ce6 (127.15.113.3:38627)
I20250411 13:57:53.920694 19925 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.0.0.1:36018
I20250411 13:57:53.921991 20310 heartbeater.cc:344] Connected to a master server at 127.15.113.62:36853
I20250411 13:57:53.922371 20310 heartbeater.cc:461] Registering TS with master...
I20250411 13:57:53.923031 20310 heartbeater.cc:507] Master 127.15.113.62:36853 requested a full tablet report, sending...
I20250411 13:57:53.925104 19860 ts_manager.cc:194] Registered new tserver with Master: 4454b321e8d14b85b304b41af6c25ce6 (127.15.113.3:38627)
I20250411 13:57:53.926223 15812 internal_mini_cluster.cc:371] 3 TS(s) registered with all masters after 0.015461007s
I20250411 13:57:53.979773 15812 tablet_server.cc:178] TabletServer@127.15.113.1:0 shutting down...
I20250411 13:57:53.994956 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:54.010630 15812 tablet_server.cc:195] TabletServer@127.15.113.1:0 shutdown complete.
I20250411 13:57:54.018035 15812 tablet_server.cc:178] TabletServer@127.15.113.2:0 shutting down...
I20250411 13:57:54.032373 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:54.048579 15812 tablet_server.cc:195] TabletServer@127.15.113.2:0 shutdown complete.
I20250411 13:57:54.056128 15812 tablet_server.cc:178] TabletServer@127.15.113.3:0 shutting down...
I20250411 13:57:54.071763 15812 ts_tablet_manager.cc:1500] Shutting down tablet manager...
I20250411 13:57:54.088321 15812 tablet_server.cc:195] TabletServer@127.15.113.3:0 shutdown complete.
I20250411 13:57:54.095382 15812 master.cc:561] Master@127.15.113.62:36853 shutting down...
I20250411 13:57:54.108598 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0 [term 1 FOLLOWER]: Raft consensus shutting down.
I20250411 13:57:54.109102 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0 [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:54.109400 15812 tablet_replica.cc:331] T 00000000000000000000000000000000 P 004c9e041df9400aa8cb5040cff98be0: stopping tablet replica
I20250411 13:57:54.127727 15812 master.cc:583] Master@127.15.113.62:36853 shutdown complete.
I20250411 13:57:54.137308 15812 master.cc:561] Master@127.15.113.61:37369 shutting down...
W20250411 13:57:54.150374 19911 consensus_peers.cc:487] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e -> Peer 004c9e041df9400aa8cb5040cff98be0 (127.15.113.62:36853): Couldn't send request to peer 004c9e041df9400aa8cb5040cff98be0. Status: Network error: Client connection negotiation failed: client connection to 127.15.113.62:36853: connect: Connection refused (error 111). This is attempt 1: this message will repeat every 5th retry.
I20250411 13:57:54.152796 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [term 1 LEADER]: Raft consensus shutting down.
I20250411 13:57:54.154150 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:54.154606 15812 tablet_replica.cc:331] T 00000000000000000000000000000000 P 09f6d09acd7248b283def53ddac78e3e: stopping tablet replica
I20250411 13:57:54.173147 15812 master.cc:583] Master@127.15.113.61:37369 shutdown complete.
I20250411 13:57:54.183375 15812 master.cc:561] Master@127.15.113.60:42571 shutting down...
I20250411 13:57:54.194972 15812 raft_consensus.cc:2241] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45 [term 1 FOLLOWER]: Raft consensus shutting down.
I20250411 13:57:54.195430 15812 raft_consensus.cc:2270] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45 [term 1 FOLLOWER]: Raft consensus is shut down!
I20250411 13:57:54.195683 15812 tablet_replica.cc:331] T 00000000000000000000000000000000 P 826c78ab96804f2e979676d601c71e45: stopping tablet replica
I20250411 13:57:54.212973 15812 master.cc:583] Master@127.15.113.60:42571 shutdown complete.
[       OK ] MasterReplicationTest.TestConnectToFollowerMasterOnly (1087 ms)
[----------] 7 tests from MasterReplicationTest (51567 ms total)

[----------] 2 tests from MasterReplicationAndRpcSizeLimitTest
[ RUN      ] MasterReplicationAndRpcSizeLimitTest.AlterTable
2025-04-11T13:57:54Z chronyd version 4.6.1 starting (+CMDMON +NTP +REFCLOCK +RTC -PRIVDROP -SCFILTER -SIGND +ASYNCDNS -NTS -SECHASH -IPV6 +DEBUG)
2025-04-11T13:57:54Z Disabled control of system clock
I20250411 13:57:54.321338 15812 external_mini_cluster.cc:1351] Running /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
/tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-0/wal
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-0/data
--block_manager=log
--webserver_interface=localhost
--never_fsync
--enable_minidumps=false
--redact=none
--metrics_log_interval_ms=1000
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-0/logs
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-0/data/info.pb
--server_dump_info_format=pb
--rpc_server_allow_ephemeral_ports
--unlock_experimental_flags
--unlock_unsafe_flags
--logtostderr
--logbuflevel=-1
--ipki_server_key_size=768
--openssl_security_level_override=0
master
run
--ipki_ca_key_size=768
--tsk_num_rsa_bits=512
--rpc_bind_addresses=127.15.113.62:39525
--webserver_interface=127.15.113.62
--webserver_port=0
--builtin_ntp_servers=127.15.113.20:43143
--builtin_ntp_poll_interval_ms=100
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--rpc_reuseport=true
--master_addresses=127.15.113.62:39525,127.15.113.61:38061,127.15.113.60:39549
--raft_heartbeat_interval_ms=500
--leader_failure_max_missed_heartbeat_periods=2
--rpc_max_message_size_enable_validation=false
--rpc_max_message_size=65536
--consensus_max_batch_size_bytes=64512
--catalog_manager_enable_chunked_tablet_reports=false with env {}
W20250411 13:57:54.633328 20336 flags.cc:425] Enabled unsafe flag: --rpc_max_message_size_enable_validation=false
W20250411 13:57:54.633888 20336 flags.cc:425] Enabled unsafe flag: --openssl_security_level_override=0
W20250411 13:57:54.634229 20336 flags.cc:425] Enabled unsafe flag: --rpc_server_allow_ephemeral_ports=true
W20250411 13:57:54.634676 20336 flags.cc:425] Enabled unsafe flag: --never_fsync=true
W20250411 13:57:54.664577 20336 flags.cc:425] Enabled experimental flag: --ipki_ca_key_size=768
W20250411 13:57:54.664958 20336 flags.cc:425] Enabled experimental flag: --ipki_server_key_size=768
W20250411 13:57:54.665216 20336 flags.cc:425] Enabled experimental flag: --tsk_num_rsa_bits=512
W20250411 13:57:54.665470 20336 flags.cc:425] Enabled experimental flag: --rpc_reuseport=true
I20250411 13:57:54.699612 20336 master_runner.cc:386] Master server non-default flags:
--builtin_ntp_poll_interval_ms=100
--builtin_ntp_servers=127.15.113.20:43143
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--consensus_max_batch_size_bytes=64512
--leader_failure_max_missed_heartbeat_periods=2
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-0/data
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-0/wal
--catalog_manager_enable_chunked_tablet_reports=false
--ipki_ca_key_size=768
--master_addresses=127.15.113.62:39525,127.15.113.61:38061,127.15.113.60:39549
--rpc_max_message_size=65536
--rpc_max_message_size_enable_validation=false
--ipki_server_key_size=768
--openssl_security_level_override=0
--tsk_num_rsa_bits=512
--rpc_bind_addresses=127.15.113.62:39525
--rpc_reuseport=true
--rpc_server_allow_ephemeral_ports=true
--metrics_log_interval_ms=1000
--server_dump_info_format=pb
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-0/data/info.pb
--webserver_interface=127.15.113.62
--webserver_port=0
--never_fsync=true
--redact=none
--unlock_experimental_flags=true
--unlock_unsafe_flags=true
--enable_minidumps=false
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-0/logs
--logbuflevel=-1
--logtostderr=true

Master server version:
kudu 1.18.0-SNAPSHOT
revision 586cfc20905df35f49541b53af4442736790b0c1
build type FASTDEBUG
built by None at 11 Apr 2025 13:43:20 UTC on 5fd53c4cbb9d
build id 5571
TSAN enabled
I20250411 13:57:54.701017 20336 env_posix.cc:2264] Not raising this process' open files per process limit of 1048576; it is already as high as it can go
I20250411 13:57:54.702612 20336 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:54.716353 20342 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:54.717303 20343 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:54.717128 20345 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:54.718925 20336 server_base.cc:1034] running on GCE node
I20250411 13:57:55.826128 20336 hybrid_clock.cc:584] initializing the hybrid clock with 'builtin' time source
I20250411 13:57:55.828683 20336 hybrid_clock.cc:630] waiting up to --ntp_initial_sync_wait_secs=10 seconds for the clock to synchronize
I20250411 13:57:55.830008 20336 hybrid_clock.cc:648] HybridClock initialized: now 1744379875829968 us; error 49 us; skew 500 ppm
I20250411 13:57:55.830729 20336 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:55.836802 20336 webserver.cc:466] Webserver started at http://127.15.113.62:44565/ using document root <none> and password file <none>
I20250411 13:57:55.837641 20336 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:55.837847 20336 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:55.838310 20336 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:55.842672 20336 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-0/data/instance:
uuid: "9ab79444e1c949e29c1a31cae1bf6a89"
format_stamp: "Formatted at 2025-04-11 13:57:55 on dist-test-slave-jcj2"
I20250411 13:57:55.843716 20336 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-0/wal/instance:
uuid: "9ab79444e1c949e29c1a31cae1bf6a89"
format_stamp: "Formatted at 2025-04-11 13:57:55 on dist-test-slave-jcj2"
I20250411 13:57:55.850625 20336 fs_manager.cc:696] Time spent creating directory manager: real 0.006s	user 0.006s	sys 0.001s
I20250411 13:57:55.855793 20352 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:55.856786 20336 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.003s	sys 0.002s
I20250411 13:57:55.857082 20336 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-0/data,/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-0/wal
uuid: "9ab79444e1c949e29c1a31cae1bf6a89"
format_stamp: "Formatted at 2025-04-11 13:57:55 on dist-test-slave-jcj2"
I20250411 13:57:55.857383 20336 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-0/wal
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-0/wal
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-0/data/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:55.909934 20336 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:55.911306 20336 env_posix.cc:2264] Not raising this process' running threads per effective uid limit of 18446744073709551615; it is already as high as it can go
I20250411 13:57:55.911712 20336 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:55.976701 20336 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.62:39525
I20250411 13:57:55.976794 20403 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.62:39525 every 8 connection(s)
I20250411 13:57:55.979230 20336 server_base.cc:1166] Dumped server information to /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-0/data/info.pb
I20250411 13:57:55.983680 15812 external_mini_cluster.cc:1413] Started /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu as pid 20336
I20250411 13:57:55.984251 15812 external_mini_cluster.cc:1427] Reading /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-0/wal/instance
I20250411 13:57:55.984684 20404 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:55.988072 15812 external_mini_cluster.cc:1351] Running /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
/tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-1/wal
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-1/data
--block_manager=log
--webserver_interface=localhost
--never_fsync
--enable_minidumps=false
--redact=none
--metrics_log_interval_ms=1000
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-1/logs
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-1/data/info.pb
--server_dump_info_format=pb
--rpc_server_allow_ephemeral_ports
--unlock_experimental_flags
--unlock_unsafe_flags
--logtostderr
--logbuflevel=-1
--ipki_server_key_size=768
--openssl_security_level_override=0
master
run
--ipki_ca_key_size=768
--tsk_num_rsa_bits=512
--rpc_bind_addresses=127.15.113.61:38061
--webserver_interface=127.15.113.61
--webserver_port=0
--builtin_ntp_servers=127.15.113.20:43143
--builtin_ntp_poll_interval_ms=100
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--rpc_reuseport=true
--master_addresses=127.15.113.62:39525,127.15.113.61:38061,127.15.113.60:39549
--raft_heartbeat_interval_ms=500
--leader_failure_max_missed_heartbeat_periods=2
--rpc_max_message_size_enable_validation=false
--rpc_max_message_size=65536
--consensus_max_batch_size_bytes=64512
--catalog_manager_enable_chunked_tablet_reports=false with env {}
I20250411 13:57:56.000913 20404 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:56.023897 20404 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:57:56.026161 20354 proxy.cc:239] Call had error, refreshing address and retrying: Network error: Client connection negotiation failed: client connection to 127.15.113.61:38061: connect: Connection refused (error 111)
W20250411 13:57:56.029803 20404 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:38061: Network error: Client connection negotiation failed: client connection to 127.15.113.61:38061: connect: Connection refused (error 111)
I20250411 13:57:56.079234 20404 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } attempt: 1
W20250411 13:57:56.082840 20404 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:38061: Network error: Client connection negotiation failed: client connection to 127.15.113.61:38061: connect: Connection refused (error 111)
I20250411 13:57:56.151217 20404 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } attempt: 2
W20250411 13:57:56.154898 20404 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:38061: Network error: Client connection negotiation failed: client connection to 127.15.113.61:38061: connect: Connection refused (error 111)
I20250411 13:57:56.246279 20404 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } attempt: 3
W20250411 13:57:56.249984 20404 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:38061: Network error: Client connection negotiation failed: client connection to 127.15.113.61:38061: connect: Connection refused (error 111)
W20250411 13:57:56.301313 20408 flags.cc:425] Enabled unsafe flag: --rpc_max_message_size_enable_validation=false
W20250411 13:57:56.301775 20408 flags.cc:425] Enabled unsafe flag: --openssl_security_level_override=0
W20250411 13:57:56.302096 20408 flags.cc:425] Enabled unsafe flag: --rpc_server_allow_ephemeral_ports=true
W20250411 13:57:56.302490 20408 flags.cc:425] Enabled unsafe flag: --never_fsync=true
W20250411 13:57:56.331459 20408 flags.cc:425] Enabled experimental flag: --ipki_ca_key_size=768
W20250411 13:57:56.331853 20408 flags.cc:425] Enabled experimental flag: --ipki_server_key_size=768
W20250411 13:57:56.332123 20408 flags.cc:425] Enabled experimental flag: --tsk_num_rsa_bits=512
W20250411 13:57:56.332353 20408 flags.cc:425] Enabled experimental flag: --rpc_reuseport=true
I20250411 13:57:56.366006 20408 master_runner.cc:386] Master server non-default flags:
--builtin_ntp_poll_interval_ms=100
--builtin_ntp_servers=127.15.113.20:43143
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--consensus_max_batch_size_bytes=64512
--leader_failure_max_missed_heartbeat_periods=2
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-1/data
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-1/wal
--catalog_manager_enable_chunked_tablet_reports=false
--ipki_ca_key_size=768
--master_addresses=127.15.113.62:39525,127.15.113.61:38061,127.15.113.60:39549
--rpc_max_message_size=65536
--rpc_max_message_size_enable_validation=false
--ipki_server_key_size=768
--openssl_security_level_override=0
--tsk_num_rsa_bits=512
--rpc_bind_addresses=127.15.113.61:38061
--rpc_reuseport=true
--rpc_server_allow_ephemeral_ports=true
--metrics_log_interval_ms=1000
--server_dump_info_format=pb
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-1/data/info.pb
--webserver_interface=127.15.113.61
--webserver_port=0
--never_fsync=true
--redact=none
--unlock_experimental_flags=true
--unlock_unsafe_flags=true
--enable_minidumps=false
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-1/logs
--logbuflevel=-1
--logtostderr=true

Master server version:
kudu 1.18.0-SNAPSHOT
revision 586cfc20905df35f49541b53af4442736790b0c1
build type FASTDEBUG
built by None at 11 Apr 2025 13:43:20 UTC on 5fd53c4cbb9d
build id 5571
TSAN enabled
I20250411 13:57:56.367358 20408 env_posix.cc:2264] Not raising this process' open files per process limit of 1048576; it is already as high as it can go
I20250411 13:57:56.368880 20408 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:57:56.383138 20416 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:57:56.393304 20404 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } attempt: 4
W20250411 13:57:56.396916 20404 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:38061: Network error: Client connection negotiation failed: client connection to 127.15.113.61:38061: connect: Connection refused (error 111)
I20250411 13:57:56.696288 20404 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } attempt: 5
W20250411 13:57:56.700794 20404 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:38061: Network error: Client connection negotiation failed: client connection to 127.15.113.61:38061: connect: Connection refused (error 111)
I20250411 13:57:57.248154 20404 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } attempt: 6
W20250411 13:57:57.255950 20404 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:38061: Network error: Client connection negotiation failed: client connection to 127.15.113.61:38061: connect: Connection refused (error 111)
W20250411 13:57:56.383373 20418 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:56.383848 20415 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:57.508525 20417 instance_detector.cc:116] could not retrieve GCE instance metadata: Timed out: curl timeout: Timeout was reached: Connection time-out
I20250411 13:57:57.508620 20408 server_base.cc:1029] Not found: could not retrieve instance metadata: unable to detect cloud type of this node, probably running in non-cloud environment
I20250411 13:57:57.512434 20408 hybrid_clock.cc:584] initializing the hybrid clock with 'builtin' time source
I20250411 13:57:57.515056 20408 hybrid_clock.cc:630] waiting up to --ntp_initial_sync_wait_secs=10 seconds for the clock to synchronize
I20250411 13:57:57.516361 20408 hybrid_clock.cc:648] HybridClock initialized: now 1744379877516327 us; error 52 us; skew 500 ppm
I20250411 13:57:57.517145 20408 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:57.523036 20408 webserver.cc:466] Webserver started at http://127.15.113.61:40691/ using document root <none> and password file <none>
I20250411 13:57:57.523907 20408 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:57.524106 20408 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:57.524534 20408 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:57.528810 20408 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-1/data/instance:
uuid: "11afb5e727404182a1e9ba1cf176f555"
format_stamp: "Formatted at 2025-04-11 13:57:57 on dist-test-slave-jcj2"
I20250411 13:57:57.529824 20408 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-1/wal/instance:
uuid: "11afb5e727404182a1e9ba1cf176f555"
format_stamp: "Formatted at 2025-04-11 13:57:57 on dist-test-slave-jcj2"
I20250411 13:57:57.536428 20408 fs_manager.cc:696] Time spent creating directory manager: real 0.006s	user 0.004s	sys 0.004s
I20250411 13:57:57.541567 20427 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:57.542534 20408 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.002s	sys 0.000s
I20250411 13:57:57.542821 20408 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-1/data,/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-1/wal
uuid: "11afb5e727404182a1e9ba1cf176f555"
format_stamp: "Formatted at 2025-04-11 13:57:57 on dist-test-slave-jcj2"
I20250411 13:57:57.543195 20408 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-1/wal
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-1/wal
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-1/data/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:57.597873 20408 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:57.599287 20408 env_posix.cc:2264] Not raising this process' running threads per effective uid limit of 18446744073709551615; it is already as high as it can go
I20250411 13:57:57.599737 20408 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:57.666309 20408 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.61:38061
I20250411 13:57:57.666400 20478 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.61:38061 every 8 connection(s)
I20250411 13:57:57.668972 20408 server_base.cc:1166] Dumped server information to /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-1/data/info.pb
I20250411 13:57:57.673508 15812 external_mini_cluster.cc:1413] Started /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu as pid 20408
I20250411 13:57:57.673988 15812 external_mini_cluster.cc:1427] Reading /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-1/wal/instance
I20250411 13:57:57.674180 20479 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:57.676690 15812 external_mini_cluster.cc:1351] Running /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
/tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-2/wal
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-2/data
--block_manager=log
--webserver_interface=localhost
--never_fsync
--enable_minidumps=false
--redact=none
--metrics_log_interval_ms=1000
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-2/logs
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-2/data/info.pb
--server_dump_info_format=pb
--rpc_server_allow_ephemeral_ports
--unlock_experimental_flags
--unlock_unsafe_flags
--logtostderr
--logbuflevel=-1
--ipki_server_key_size=768
--openssl_security_level_override=0
master
run
--ipki_ca_key_size=768
--tsk_num_rsa_bits=512
--rpc_bind_addresses=127.15.113.60:39549
--webserver_interface=127.15.113.60
--webserver_port=0
--builtin_ntp_servers=127.15.113.20:43143
--builtin_ntp_poll_interval_ms=100
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--rpc_reuseport=true
--master_addresses=127.15.113.62:39525,127.15.113.61:38061,127.15.113.60:39549
--raft_heartbeat_interval_ms=500
--leader_failure_max_missed_heartbeat_periods=2
--rpc_max_message_size_enable_validation=false
--rpc_max_message_size=65536
--consensus_max_batch_size_bytes=64512
--catalog_manager_enable_chunked_tablet_reports=false with env {}
I20250411 13:57:57.684226 20479 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:57.703936 20479 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:57.718420 20479 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:57:57.720604 20431 proxy.cc:239] Call had error, refreshing address and retrying: Network error: Client connection negotiation failed: client connection to 127.15.113.60:39549: connect: Connection refused (error 111)
W20250411 13:57:57.724426 20479 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:39549: Network error: Client connection negotiation failed: client connection to 127.15.113.60:39549: connect: Connection refused (error 111)
I20250411 13:57:57.774024 20479 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } attempt: 1
W20250411 13:57:57.778168 20479 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:39549: Network error: Client connection negotiation failed: client connection to 127.15.113.60:39549: connect: Connection refused (error 111)
I20250411 13:57:57.846558 20479 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } attempt: 2
W20250411 13:57:57.850778 20479 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:39549: Network error: Client connection negotiation failed: client connection to 127.15.113.60:39549: connect: Connection refused (error 111)
I20250411 13:57:57.942283 20479 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } attempt: 3
W20250411 13:57:57.946926 20479 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:39549: Network error: Client connection negotiation failed: client connection to 127.15.113.60:39549: connect: Connection refused (error 111)
W20250411 13:57:58.014004 20483 flags.cc:425] Enabled unsafe flag: --rpc_max_message_size_enable_validation=false
W20250411 13:57:58.014532 20483 flags.cc:425] Enabled unsafe flag: --openssl_security_level_override=0
W20250411 13:57:58.014874 20483 flags.cc:425] Enabled unsafe flag: --rpc_server_allow_ephemeral_ports=true
W20250411 13:57:58.015290 20483 flags.cc:425] Enabled unsafe flag: --never_fsync=true
W20250411 13:57:58.044178 20483 flags.cc:425] Enabled experimental flag: --ipki_ca_key_size=768
W20250411 13:57:58.044544 20483 flags.cc:425] Enabled experimental flag: --ipki_server_key_size=768
W20250411 13:57:58.044819 20483 flags.cc:425] Enabled experimental flag: --tsk_num_rsa_bits=512
W20250411 13:57:58.045037 20483 flags.cc:425] Enabled experimental flag: --rpc_reuseport=true
I20250411 13:57:58.077284 20483 master_runner.cc:386] Master server non-default flags:
--builtin_ntp_poll_interval_ms=100
--builtin_ntp_servers=127.15.113.20:43143
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--consensus_max_batch_size_bytes=64512
--leader_failure_max_missed_heartbeat_periods=2
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-2/data
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-2/wal
--catalog_manager_enable_chunked_tablet_reports=false
--ipki_ca_key_size=768
--master_addresses=127.15.113.62:39525,127.15.113.61:38061,127.15.113.60:39549
--rpc_max_message_size=65536
--rpc_max_message_size_enable_validation=false
--ipki_server_key_size=768
--openssl_security_level_override=0
--tsk_num_rsa_bits=512
--rpc_bind_addresses=127.15.113.60:39549
--rpc_reuseport=true
--rpc_server_allow_ephemeral_ports=true
--metrics_log_interval_ms=1000
--server_dump_info_format=pb
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-2/data/info.pb
--webserver_interface=127.15.113.60
--webserver_port=0
--never_fsync=true
--redact=none
--unlock_experimental_flags=true
--unlock_unsafe_flags=true
--enable_minidumps=false
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-2/logs
--logbuflevel=-1
--logtostderr=true

Master server version:
kudu 1.18.0-SNAPSHOT
revision 586cfc20905df35f49541b53af4442736790b0c1
build type FASTDEBUG
built by None at 11 Apr 2025 13:43:20 UTC on 5fd53c4cbb9d
build id 5571
TSAN enabled
I20250411 13:57:58.078599 20483 env_posix.cc:2264] Not raising this process' open files per process limit of 1048576; it is already as high as it can go
I20250411 13:57:58.080139 20483 file_cache.cc:492] Constructed file cache file cache with capacity 419430
I20250411 13:57:58.090303 20479 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } attempt: 4
W20250411 13:57:58.095695 20492 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:58.096817 20479 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:39549: Network error: Client connection negotiation failed: client connection to 127.15.113.60:39549: connect: Connection refused (error 111)
I20250411 13:57:58.316453 20404 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } attempt: 7
I20250411 13:57:58.351790 20404 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:57:58.360899 20404 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:39549: Network error: Client connection negotiation failed: client connection to 127.15.113.60:39549: connect: Connection refused (error 111)
I20250411 13:57:58.396260 20479 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } attempt: 5
W20250411 13:57:58.403082 20479 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:39549: Network error: Client connection negotiation failed: client connection to 127.15.113.60:39549: connect: Connection refused (error 111)
I20250411 13:57:58.419454 20404 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } attempt: 1
W20250411 13:57:58.425599 20404 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:39549: Network error: Client connection negotiation failed: client connection to 127.15.113.60:39549: connect: Connection refused (error 111)
I20250411 13:57:58.507196 20404 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } attempt: 2
W20250411 13:57:58.513654 20404 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:39549: Network error: Client connection negotiation failed: client connection to 127.15.113.60:39549: connect: Connection refused (error 111)
I20250411 13:57:58.599244 20404 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } attempt: 3
W20250411 13:57:58.605525 20404 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:39549: Network error: Client connection negotiation failed: client connection to 127.15.113.60:39549: connect: Connection refused (error 111)
I20250411 13:57:58.746191 20404 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } attempt: 4
W20250411 13:57:58.752477 20404 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:39549: Network error: Client connection negotiation failed: client connection to 127.15.113.60:39549: connect: Connection refused (error 111)
I20250411 13:57:58.950675 20479 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } attempt: 6
W20250411 13:57:58.962697 20479 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:39549: Network error: Client connection negotiation failed: client connection to 127.15.113.60:39549: connect: Connection refused (error 111)
I20250411 13:57:59.036033 20404 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } attempt: 5
W20250411 13:57:59.042196 20404 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:39549: Network error: Client connection negotiation failed: client connection to 127.15.113.60:39549: connect: Connection refused (error 111)
W20250411 13:57:58.095847 20491 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:58.097251 20494 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:57:59.322321 20493 instance_detector.cc:116] could not retrieve GCE instance metadata: Timed out: curl timeout: Timeout was reached: Resolving timed out after 1225 milliseconds
I20250411 13:57:59.322438 20483 server_base.cc:1029] Not found: could not retrieve instance metadata: unable to detect cloud type of this node, probably running in non-cloud environment
I20250411 13:57:59.323642 20483 hybrid_clock.cc:584] initializing the hybrid clock with 'builtin' time source
I20250411 13:57:59.326076 20483 hybrid_clock.cc:630] waiting up to --ntp_initial_sync_wait_secs=10 seconds for the clock to synchronize
I20250411 13:57:59.327519 20483 hybrid_clock.cc:648] HybridClock initialized: now 1744379879327450 us; error 89 us; skew 500 ppm
I20250411 13:57:59.328298 20483 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:57:59.334601 20483 webserver.cc:466] Webserver started at http://127.15.113.60:39919/ using document root <none> and password file <none>
I20250411 13:57:59.335510 20483 fs_manager.cc:362] Metadata directory not provided
I20250411 13:57:59.335722 20483 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:57:59.336223 20483 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:57:59.342051 20483 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-2/data/instance:
uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a"
format_stamp: "Formatted at 2025-04-11 13:57:59 on dist-test-slave-jcj2"
I20250411 13:57:59.343125 20483 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-2/wal/instance:
uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a"
format_stamp: "Formatted at 2025-04-11 13:57:59 on dist-test-slave-jcj2"
I20250411 13:57:59.350273 20483 fs_manager.cc:696] Time spent creating directory manager: real 0.007s	user 0.006s	sys 0.001s
I20250411 13:57:59.355430 20506 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:57:59.356527 20483 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.002s	sys 0.002s
I20250411 13:57:59.356845 20483 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-2/data,/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-2/wal
uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a"
format_stamp: "Formatted at 2025-04-11 13:57:59 on dist-test-slave-jcj2"
I20250411 13:57:59.357154 20483 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-2/wal
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-2/wal
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-2/data/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:57:59.405092 20483 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:57:59.406503 20483 env_posix.cc:2264] Not raising this process' running threads per effective uid limit of 18446744073709551615; it is already as high as it can go
I20250411 13:57:59.406978 20483 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:57:59.482897 20483 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.60:39549
I20250411 13:57:59.482980 20557 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.60:39549 every 8 connection(s)
I20250411 13:57:59.485345 20483 server_base.cc:1166] Dumped server information to /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-2/data/info.pb
I20250411 13:57:59.491063 20558 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:57:59.491171 15812 external_mini_cluster.cc:1413] Started /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu as pid 20483
I20250411 13:57:59.491703 15812 external_mini_cluster.cc:1427] Reading /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/master-2/wal/instance
I20250411 13:57:59.508291 20558 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:59.531893 20558 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:59.540725 20558 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:57:59.556303 20558 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a: Bootstrap starting.
I20250411 13:57:59.562641 20558 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:59.564882 20558 log.cc:826] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a: Log is configured to *not* fsync() on all Append() calls
I20250411 13:57:59.569705 20558 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a: No bootstrap required, opened a new log
I20250411 13:57:59.587213 20558 raft_consensus.cc:357] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } } peers { permanent_uuid: "11afb5e727404182a1e9ba1cf176f555" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } } peers { permanent_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } }
I20250411 13:57:59.587848 20558 raft_consensus.cc:383] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:59.588038 20558 raft_consensus.cc:738] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: fcc88b7b5f5446e6b92c18e8a9e2f74a, State: Initialized, Role: FOLLOWER
I20250411 13:57:59.588716 20558 consensus_queue.cc:260] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } } peers { permanent_uuid: "11afb5e727404182a1e9ba1cf176f555" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } } peers { permanent_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } }
I20250411 13:57:59.591879 20566 sys_catalog.cc:455] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } } peers { permanent_uuid: "11afb5e727404182a1e9ba1cf176f555" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } } peers { permanent_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } } }
I20250411 13:57:59.593046 20566 sys_catalog.cc:458] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:59.593647 20558 sys_catalog.cc:564] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:57:59.594722 20404 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } attempt: 6
I20250411 13:57:59.619102 20404 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89: Bootstrap starting.
I20250411 13:57:59.628253 20404 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89: Neither blocks nor log segments found. Creating new log.
I20250411 13:57:59.631043 20404 log.cc:826] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89: Log is configured to *not* fsync() on all Append() calls
W20250411 13:57:59.631134 20578 catalog_manager.cc:1560] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
W20250411 13:57:59.631518 20578 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
I20250411 13:57:59.642563 20404 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89: No bootstrap required, opened a new log
I20250411 13:57:59.619776 20483 master_runner.cc:186] Error getting master registration for 127.15.113.62:39525: OK, instance_id { permanent_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" instance_seqno: 1744379875958747 } error { code: CATALOG_MANAGER_NOT_INITIALIZED status { code: SERVICE_UNAVAILABLE message: "Catalog manager is not initialized. State: Starting" } }
I20250411 13:57:59.650283 20483 master_runner.cc:418] Couldn't verify the masters in the cluster. Trying again...
W20250411 13:57:59.662180 20512 tablet.cc:2367] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a: Can't schedule compaction. Clean time has not been advanced past its initial value.
I20250411 13:57:59.662782 20404 raft_consensus.cc:357] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } } peers { permanent_uuid: "11afb5e727404182a1e9ba1cf176f555" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } } peers { permanent_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } }
I20250411 13:57:59.663422 20404 raft_consensus.cc:383] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:57:59.663641 20404 raft_consensus.cc:738] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 9ab79444e1c949e29c1a31cae1bf6a89, State: Initialized, Role: FOLLOWER
I20250411 13:57:59.664445 20404 consensus_queue.cc:260] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } } peers { permanent_uuid: "11afb5e727404182a1e9ba1cf176f555" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } } peers { permanent_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } }
I20250411 13:57:59.667110 20581 sys_catalog.cc:455] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } } peers { permanent_uuid: "11afb5e727404182a1e9ba1cf176f555" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } } peers { permanent_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } } }
I20250411 13:57:59.668423 20581 sys_catalog.cc:458] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:59.669162 20404 sys_catalog.cc:564] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89 [sys.catalog]: configured and running, proceeding with master startup.
W20250411 13:57:59.670991 20358 tablet.cc:2367] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89: Can't schedule compaction. Clean time has not been advanced past its initial value.
I20250411 13:57:59.707319 20566 raft_consensus.cc:491] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:57:59.707891 20566 raft_consensus.cc:513] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } } peers { permanent_uuid: "11afb5e727404182a1e9ba1cf176f555" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } } peers { permanent_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } }
I20250411 13:57:59.711553 20379 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" is_pre_election: true
I20250411 13:57:59.711959 20566 leader_election.cc:290] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers 9ab79444e1c949e29c1a31cae1bf6a89 (127.15.113.62:39525), 11afb5e727404182a1e9ba1cf176f555 (127.15.113.61:38061)
I20250411 13:57:59.712373 20379 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate fcc88b7b5f5446e6b92c18e8a9e2f74a in term 0.
I20250411 13:57:59.714430 20510 leader_election.cc:304] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 9ab79444e1c949e29c1a31cae1bf6a89, fcc88b7b5f5446e6b92c18e8a9e2f74a; no voters: 
I20250411 13:57:59.715507 20566 raft_consensus.cc:2802] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:57:59.715854 20566 raft_consensus.cc:491] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:57:59.716202 20566 raft_consensus.cc:3058] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [term 0 FOLLOWER]: Advancing to term 1
W20250411 13:57:59.718971 20592 catalog_manager.cc:1560] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
W20250411 13:57:59.719313 20592 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
I20250411 13:57:59.730077 20566 raft_consensus.cc:513] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } } peers { permanent_uuid: "11afb5e727404182a1e9ba1cf176f555" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } } peers { permanent_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } }
I20250411 13:57:59.743916 20566 leader_election.cc:290] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [CANDIDATE]: Term 1 election: Requested vote from peers 9ab79444e1c949e29c1a31cae1bf6a89 (127.15.113.62:39525), 11afb5e727404182a1e9ba1cf176f555 (127.15.113.61:38061)
I20250411 13:57:59.744046 20379 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "9ab79444e1c949e29c1a31cae1bf6a89"
I20250411 13:57:59.744606 20379 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:57:59.753554 20379 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate fcc88b7b5f5446e6b92c18e8a9e2f74a in term 1.
I20250411 13:57:59.755314 20510 leader_election.cc:304] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [CANDIDATE]: Term 1 election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 9ab79444e1c949e29c1a31cae1bf6a89, fcc88b7b5f5446e6b92c18e8a9e2f74a; no voters: 
I20250411 13:57:59.756192 20566 raft_consensus.cc:2802] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [term 1 FOLLOWER]: Leader election won for term 1
I20250411 13:57:59.769515 20566 raft_consensus.cc:695] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [term 1 LEADER]: Becoming Leader. State: Replica: fcc88b7b5f5446e6b92c18e8a9e2f74a, State: Running, Role: LEADER
I20250411 13:57:59.770825 20566 consensus_queue.cc:237] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 1, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } } peers { permanent_uuid: "11afb5e727404182a1e9ba1cf176f555" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } } peers { permanent_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } }
I20250411 13:57:59.724023 20336 master_runner.cc:186] Error getting master registration for 127.15.113.61:38061: OK, instance_id { permanent_uuid: "11afb5e727404182a1e9ba1cf176f555" instance_seqno: 1744379877647976 } error { code: CATALOG_MANAGER_NOT_INITIALIZED status { code: SERVICE_UNAVAILABLE message: "Catalog manager is not initialized. State: Starting" } }
I20250411 13:57:59.781476 20336 master_runner.cc:418] Couldn't verify the masters in the cluster. Trying again...
I20250411 13:57:59.784515 20594 sys_catalog.cc:455] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [sys.catalog]: SysCatalogTable state changed. Reason: New leader fcc88b7b5f5446e6b92c18e8a9e2f74a. Latest consensus state: current_term: 1 leader_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } } peers { permanent_uuid: "11afb5e727404182a1e9ba1cf176f555" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } } peers { permanent_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } } }
I20250411 13:57:59.785204 20594 sys_catalog.cc:458] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:59.797804 20596 catalog_manager.cc:1477] Loading table and tablet metadata into memory...
I20250411 13:57:59.804975 20596 catalog_manager.cc:1486] Initializing Kudu cluster ID...
I20250411 13:57:59.823323 20379 raft_consensus.cc:1273] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89 [term 1 FOLLOWER]: Refusing update from remote peer fcc88b7b5f5446e6b92c18e8a9e2f74a: Log matching property violated. Preceding OpId in replica: term: 0 index: 0. Preceding OpId from leader: term: 1 index: 2. (index mismatch)
I20250411 13:57:59.824877 20566 consensus_queue.cc:1035] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [LEADER]: Connected to new peer: Peer: permanent_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:57:59.856132 20597 mvcc.cc:204] Tried to move back new op lower bound from 7144979987737034752 to 7144979987553288192. Current Snapshot: MvccSnapshot[applied={T|T < 7144979987737034752}]
I20250411 13:57:59.859843 20581 sys_catalog.cc:455] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89 [sys.catalog]: SysCatalogTable state changed. Reason: New leader fcc88b7b5f5446e6b92c18e8a9e2f74a. Latest consensus state: current_term: 1 leader_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } } peers { permanent_uuid: "11afb5e727404182a1e9ba1cf176f555" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } } peers { permanent_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } } }
I20250411 13:57:59.860769 20581 sys_catalog.cc:458] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:59.889022 20598 mvcc.cc:204] Tried to move back new op lower bound from 7144979987737034752 to 7144979987553288192. Current Snapshot: MvccSnapshot[applied={T|T < 7144979987737034752}]
I20250411 13:57:59.907693 20581 sys_catalog.cc:455] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89 [sys.catalog]: SysCatalogTable state changed. Reason: Replicated consensus-only round. Latest consensus state: current_term: 1 leader_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } } peers { permanent_uuid: "11afb5e727404182a1e9ba1cf176f555" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } } peers { permanent_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } } }
I20250411 13:57:59.908618 20581 sys_catalog.cc:458] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:57:59.911746 20594 sys_catalog.cc:455] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [sys.catalog]: SysCatalogTable state changed. Reason: Peer health change. Latest consensus state: current_term: 1 leader_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } } peers { permanent_uuid: "11afb5e727404182a1e9ba1cf176f555" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } } peers { permanent_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } } }
I20250411 13:57:59.912362 20594 sys_catalog.cc:458] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [sys.catalog]: This master's current role is: LEADER
I20250411 13:57:59.920194 20596 catalog_manager.cc:1349] Generated new cluster ID: 67c81319d10943ea8b83c8c3fb3da8e4
I20250411 13:57:59.920513 20596 catalog_manager.cc:1497] Initializing Kudu internal certificate authority...
I20250411 13:57:59.955775 20596 catalog_manager.cc:1372] Generated new certificate authority record
I20250411 13:57:59.960294 20596 catalog_manager.cc:1506] Loading token signing keys...
I20250411 13:58:00.022199 20596 catalog_manager.cc:5954] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a: Generated new TSK 0
I20250411 13:58:00.023326 20596 catalog_manager.cc:1516] Initializing in-progress tserver states...
I20250411 13:58:00.026937 20479 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } attempt: 7
I20250411 13:58:00.046269 15812 external_mini_cluster.cc:1351] Running /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
/tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-0/wal
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-0/data
--block_manager=log
--webserver_interface=localhost
--never_fsync
--enable_minidumps=false
--redact=none
--metrics_log_interval_ms=1000
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-0/logs
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-0/data/info.pb
--server_dump_info_format=pb
--rpc_server_allow_ephemeral_ports
--unlock_experimental_flags
--unlock_unsafe_flags
--logtostderr
--logbuflevel=-1
--ipki_server_key_size=768
--openssl_security_level_override=0
tserver
run
--rpc_bind_addresses=127.15.113.1:0
--local_ip_for_outbound_sockets=127.15.113.1
--webserver_interface=127.15.113.1
--webserver_port=0
--tserver_master_addrs=127.15.113.62:39525,127.15.113.61:38061,127.15.113.60:39549
--builtin_ntp_servers=127.15.113.20:43143
--builtin_ntp_poll_interval_ms=100
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--raft_heartbeat_interval_ms=500
--leader_failure_max_missed_heartbeat_periods=2
--rpc_service_queue_length=200 with env {}
I20250411 13:58:00.078827 20479 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 11afb5e727404182a1e9ba1cf176f555: Bootstrap starting.
I20250411 13:58:00.090353 20479 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P 11afb5e727404182a1e9ba1cf176f555: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:00.092643 20479 log.cc:826] T 00000000000000000000000000000000 P 11afb5e727404182a1e9ba1cf176f555: Log is configured to *not* fsync() on all Append() calls
I20250411 13:58:00.118980 20479 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 11afb5e727404182a1e9ba1cf176f555: No bootstrap required, opened a new log
I20250411 13:58:00.173249 20479 raft_consensus.cc:357] T 00000000000000000000000000000000 P 11afb5e727404182a1e9ba1cf176f555 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } } peers { permanent_uuid: "11afb5e727404182a1e9ba1cf176f555" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } } peers { permanent_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } }
I20250411 13:58:00.175441 20479 raft_consensus.cc:383] T 00000000000000000000000000000000 P 11afb5e727404182a1e9ba1cf176f555 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:00.176208 20479 raft_consensus.cc:738] T 00000000000000000000000000000000 P 11afb5e727404182a1e9ba1cf176f555 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 11afb5e727404182a1e9ba1cf176f555, State: Initialized, Role: FOLLOWER
I20250411 13:58:00.178279 20479 consensus_queue.cc:260] T 00000000000000000000000000000000 P 11afb5e727404182a1e9ba1cf176f555 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } } peers { permanent_uuid: "11afb5e727404182a1e9ba1cf176f555" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } } peers { permanent_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } }
I20250411 13:58:00.195287 20606 sys_catalog.cc:455] T 00000000000000000000000000000000 P 11afb5e727404182a1e9ba1cf176f555 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } } peers { permanent_uuid: "11afb5e727404182a1e9ba1cf176f555" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } } peers { permanent_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } } }
I20250411 13:58:00.196153 20606 sys_catalog.cc:458] T 00000000000000000000000000000000 P 11afb5e727404182a1e9ba1cf176f555 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:58:00.228818 20479 sys_catalog.cc:564] T 00000000000000000000000000000000 P 11afb5e727404182a1e9ba1cf176f555 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:58:00.229444 20454 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "11afb5e727404182a1e9ba1cf176f555" is_pre_election: true
I20250411 13:58:00.230121 20453 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "11afb5e727404182a1e9ba1cf176f555"
I20250411 13:58:00.231060 20452 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 11afb5e727404182a1e9ba1cf176f555 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:00.245910 20452 raft_consensus.cc:1273] T 00000000000000000000000000000000 P 11afb5e727404182a1e9ba1cf176f555 [term 1 FOLLOWER]: Refusing update from remote peer fcc88b7b5f5446e6b92c18e8a9e2f74a: Log matching property violated. Preceding OpId in replica: term: 0 index: 0. Preceding OpId from leader: term: 1 index: 2. (index mismatch)
I20250411 13:58:00.250602 20602 consensus_queue.cc:1035] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [LEADER]: Connected to new peer: Peer: permanent_uuid: "11afb5e727404182a1e9ba1cf176f555" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.001s
I20250411 13:58:00.324039 20606 sys_catalog.cc:455] T 00000000000000000000000000000000 P 11afb5e727404182a1e9ba1cf176f555 [sys.catalog]: SysCatalogTable state changed. Reason: New leader fcc88b7b5f5446e6b92c18e8a9e2f74a. Latest consensus state: current_term: 1 leader_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } } peers { permanent_uuid: "11afb5e727404182a1e9ba1cf176f555" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } } peers { permanent_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } } }
I20250411 13:58:00.324847 20606 sys_catalog.cc:458] T 00000000000000000000000000000000 P 11afb5e727404182a1e9ba1cf176f555 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:58:00.324137 20614 sys_catalog.cc:455] T 00000000000000000000000000000000 P 11afb5e727404182a1e9ba1cf176f555 [sys.catalog]: SysCatalogTable state changed. Reason: Replicated consensus-only round. Latest consensus state: current_term: 1 leader_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } } peers { permanent_uuid: "11afb5e727404182a1e9ba1cf176f555" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } } peers { permanent_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } } }
I20250411 13:58:00.325609 20614 sys_catalog.cc:458] T 00000000000000000000000000000000 P 11afb5e727404182a1e9ba1cf176f555 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:58:00.346625 20622 catalog_manager.cc:797] Waiting for catalog manager background task thread to start: Service unavailable: Catalog manager is not initialized. State: Starting
W20250411 13:58:00.562294 20605 flags.cc:425] Enabled unsafe flag: --openssl_security_level_override=0
W20250411 13:58:00.562958 20605 flags.cc:425] Enabled unsafe flag: --rpc_server_allow_ephemeral_ports=true
W20250411 13:58:00.564222 20605 flags.cc:425] Enabled unsafe flag: --never_fsync=true
W20250411 13:58:00.605854 20605 flags.cc:425] Enabled experimental flag: --ipki_server_key_size=768
W20250411 13:58:00.606791 20605 flags.cc:425] Enabled experimental flag: --local_ip_for_outbound_sockets=127.15.113.1
I20250411 13:58:00.640975 20605 tablet_server_runner.cc:78] Tablet server non-default flags:
--builtin_ntp_poll_interval_ms=100
--builtin_ntp_servers=127.15.113.20:43143
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--leader_failure_max_missed_heartbeat_periods=2
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-0/data
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-0/wal
--ipki_server_key_size=768
--openssl_security_level_override=0
--rpc_bind_addresses=127.15.113.1:0
--rpc_server_allow_ephemeral_ports=true
--rpc_service_queue_length=200
--metrics_log_interval_ms=1000
--server_dump_info_format=pb
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-0/data/info.pb
--webserver_interface=127.15.113.1
--webserver_port=0
--tserver_master_addrs=127.15.113.62:39525,127.15.113.61:38061,127.15.113.60:39549
--never_fsync=true
--redact=none
--unlock_experimental_flags=true
--unlock_unsafe_flags=true
--enable_minidumps=false
--local_ip_for_outbound_sockets=127.15.113.1
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-0/logs
--logbuflevel=-1
--logtostderr=true

Tablet server version:
kudu 1.18.0-SNAPSHOT
revision 586cfc20905df35f49541b53af4442736790b0c1
build type FASTDEBUG
built by None at 11 Apr 2025 13:43:20 UTC on 5fd53c4cbb9d
build id 5571
TSAN enabled
I20250411 13:58:00.642390 20605 env_posix.cc:2264] Not raising this process' open files per process limit of 1048576; it is already as high as it can go
I20250411 13:58:00.644119 20605 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:58:00.663946 20628 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:58:00.726195 20592 catalog_manager.cc:1261] Loaded cluster ID: 67c81319d10943ea8b83c8c3fb3da8e4
I20250411 13:58:00.726737 20592 catalog_manager.cc:1554] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89: loading cluster ID for follower catalog manager: success
I20250411 13:58:00.747025 20592 catalog_manager.cc:1576] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89: acquiring CA information for follower catalog manager: success
I20250411 13:58:00.754489 20592 catalog_manager.cc:1604] T 00000000000000000000000000000000 P 9ab79444e1c949e29c1a31cae1bf6a89: importing token verification keys for follower catalog manager: success; most recent TSK sequence number 0
I20250411 13:58:00.778604 20602 sys_catalog.cc:455] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [sys.catalog]: SysCatalogTable state changed. Reason: Peer health change. Latest consensus state: current_term: 1 leader_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "9ab79444e1c949e29c1a31cae1bf6a89" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 39525 } } peers { permanent_uuid: "11afb5e727404182a1e9ba1cf176f555" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 38061 } } peers { permanent_uuid: "fcc88b7b5f5446e6b92c18e8a9e2f74a" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 39549 } } }
I20250411 13:58:00.779707 20602 sys_catalog.cc:458] T 00000000000000000000000000000000 P fcc88b7b5f5446e6b92c18e8a9e2f74a [sys.catalog]: This master's current role is: LEADER
I20250411 13:58:01.367734 20622 catalog_manager.cc:1261] Loaded cluster ID: 67c81319d10943ea8b83c8c3fb3da8e4
I20250411 13:58:01.368139 20622 catalog_manager.cc:1554] T 00000000000000000000000000000000 P 11afb5e727404182a1e9ba1cf176f555: loading cluster ID for follower catalog manager: success
I20250411 13:58:01.381233 20622 catalog_manager.cc:1576] T 00000000000000000000000000000000 P 11afb5e727404182a1e9ba1cf176f555: acquiring CA information for follower catalog manager: success
I20250411 13:58:01.396791 20622 catalog_manager.cc:1604] T 00000000000000000000000000000000 P 11afb5e727404182a1e9ba1cf176f555: importing token verification keys for follower catalog manager: success; most recent TSK sequence number 0
W20250411 13:58:02.062889 20627 debug-util.cc:398] Leaking SignalData structure 0x7b080001b040 after lost signal to thread 20605
W20250411 13:58:02.431828 20605 thread.cc:640] GCE (cloud detector) Time spent creating pthread: real 1.766s	user 0.474s	sys 0.979s
W20250411 13:58:00.668133 20629 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:58:02.432199 20605 thread.cc:606] GCE (cloud detector) Time spent starting thread: real 1.767s	user 0.474s	sys 0.979s
I20250411 13:58:02.446123 20605 server_base.cc:1034] running on GCE node
W20250411 13:58:02.446702 20638 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:58:02.448101 20605 hybrid_clock.cc:584] initializing the hybrid clock with 'builtin' time source
I20250411 13:58:02.462715 20605 hybrid_clock.cc:630] waiting up to --ntp_initial_sync_wait_secs=10 seconds for the clock to synchronize
I20250411 13:58:02.464227 20605 hybrid_clock.cc:648] HybridClock initialized: now 1744379882464190 us; error 49 us; skew 500 ppm
I20250411 13:58:02.465320 20605 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:58:02.490106 20605 webserver.cc:466] Webserver started at http://127.15.113.1:45227/ using document root <none> and password file <none>
I20250411 13:58:02.491395 20605 fs_manager.cc:362] Metadata directory not provided
I20250411 13:58:02.491670 20605 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:58:02.492206 20605 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:58:02.498783 20605 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-0/data/instance:
uuid: "122f34c0ee774ec9a21f147299a21168"
format_stamp: "Formatted at 2025-04-11 13:58:02 on dist-test-slave-jcj2"
I20250411 13:58:02.500344 20605 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-0/wal/instance:
uuid: "122f34c0ee774ec9a21f147299a21168"
format_stamp: "Formatted at 2025-04-11 13:58:02 on dist-test-slave-jcj2"
I20250411 13:58:02.516813 20605 fs_manager.cc:696] Time spent creating directory manager: real 0.016s	user 0.013s	sys 0.001s
I20250411 13:58:02.530431 20643 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:58:02.531786 20605 fs_manager.cc:730] Time spent opening block manager: real 0.010s	user 0.002s	sys 0.009s
I20250411 13:58:02.532176 20605 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-0/data,/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-0/wal
uuid: "122f34c0ee774ec9a21f147299a21168"
format_stamp: "Formatted at 2025-04-11 13:58:02 on dist-test-slave-jcj2"
I20250411 13:58:02.532584 20605 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-0/wal
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-0/wal
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-0/data/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:58:02.636387 20605 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:58:02.638278 20605 env_posix.cc:2264] Not raising this process' running threads per effective uid limit of 18446744073709551615; it is already as high as it can go
I20250411 13:58:02.638824 20605 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:58:02.648020 20605 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:58:02.659982 20605 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:58:02.660301 20605 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:58:02.660578 20605 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:58:02.660771 20605 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:58:03.228080 20605 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.1:33621
I20250411 13:58:03.228170 20756 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.1:33621 every 8 connection(s)
I20250411 13:58:03.230594 20605 server_base.cc:1166] Dumped server information to /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-0/data/info.pb
I20250411 13:58:03.241209 15812 external_mini_cluster.cc:1413] Started /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu as pid 20605
I20250411 13:58:03.242008 15812 external_mini_cluster.cc:1427] Reading /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-0/wal/instance
I20250411 13:58:03.266485 15812 external_mini_cluster.cc:1351] Running /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
/tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-1/wal
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-1/data
--block_manager=log
--webserver_interface=localhost
--never_fsync
--enable_minidumps=false
--redact=none
--metrics_log_interval_ms=1000
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-1/logs
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-1/data/info.pb
--server_dump_info_format=pb
--rpc_server_allow_ephemeral_ports
--unlock_experimental_flags
--unlock_unsafe_flags
--logtostderr
--logbuflevel=-1
--ipki_server_key_size=768
--openssl_security_level_override=0
tserver
run
--rpc_bind_addresses=127.15.113.2:0
--local_ip_for_outbound_sockets=127.15.113.2
--webserver_interface=127.15.113.2
--webserver_port=0
--tserver_master_addrs=127.15.113.62:39525,127.15.113.61:38061,127.15.113.60:39549
--builtin_ntp_servers=127.15.113.20:43143
--builtin_ntp_poll_interval_ms=100
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--raft_heartbeat_interval_ms=500
--leader_failure_max_missed_heartbeat_periods=2
--rpc_service_queue_length=200 with env {}
I20250411 13:58:03.322820 20759 heartbeater.cc:344] Connected to a master server at 127.15.113.62:39525
I20250411 13:58:03.323359 20759 heartbeater.cc:461] Registering TS with master...
I20250411 13:58:03.324610 20759 heartbeater.cc:507] Master 127.15.113.62:39525 requested a full tablet report, sending...
I20250411 13:58:03.326846 20758 heartbeater.cc:344] Connected to a master server at 127.15.113.61:38061
I20250411 13:58:03.327199 20758 heartbeater.cc:461] Registering TS with master...
I20250411 13:58:03.327229 20757 heartbeater.cc:344] Connected to a master server at 127.15.113.60:39549
I20250411 13:58:03.327629 20757 heartbeater.cc:461] Registering TS with master...
I20250411 13:58:03.327889 20369 ts_manager.cc:194] Registered new tserver with Master: 122f34c0ee774ec9a21f147299a21168 (127.15.113.1:33621)
I20250411 13:58:03.328085 20758 heartbeater.cc:507] Master 127.15.113.61:38061 requested a full tablet report, sending...
I20250411 13:58:03.328379 20757 heartbeater.cc:507] Master 127.15.113.60:39549 requested a full tablet report, sending...
I20250411 13:58:03.331463 20523 ts_manager.cc:194] Registered new tserver with Master: 122f34c0ee774ec9a21f147299a21168 (127.15.113.1:33621)
I20250411 13:58:03.331751 20444 ts_manager.cc:194] Registered new tserver with Master: 122f34c0ee774ec9a21f147299a21168 (127.15.113.1:33621)
I20250411 13:58:03.334513 20523 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.15.113.1:32775
W20250411 13:58:03.629961 20767 flags.cc:425] Enabled unsafe flag: --openssl_security_level_override=0
W20250411 13:58:03.630474 20767 flags.cc:425] Enabled unsafe flag: --rpc_server_allow_ephemeral_ports=true
W20250411 13:58:03.631038 20767 flags.cc:425] Enabled unsafe flag: --never_fsync=true
W20250411 13:58:03.660897 20767 flags.cc:425] Enabled experimental flag: --ipki_server_key_size=768
W20250411 13:58:03.661765 20767 flags.cc:425] Enabled experimental flag: --local_ip_for_outbound_sockets=127.15.113.2
I20250411 13:58:03.696801 20767 tablet_server_runner.cc:78] Tablet server non-default flags:
--builtin_ntp_poll_interval_ms=100
--builtin_ntp_servers=127.15.113.20:43143
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--leader_failure_max_missed_heartbeat_periods=2
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-1/data
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-1/wal
--ipki_server_key_size=768
--openssl_security_level_override=0
--rpc_bind_addresses=127.15.113.2:0
--rpc_server_allow_ephemeral_ports=true
--rpc_service_queue_length=200
--metrics_log_interval_ms=1000
--server_dump_info_format=pb
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-1/data/info.pb
--webserver_interface=127.15.113.2
--webserver_port=0
--tserver_master_addrs=127.15.113.62:39525,127.15.113.61:38061,127.15.113.60:39549
--never_fsync=true
--redact=none
--unlock_experimental_flags=true
--unlock_unsafe_flags=true
--enable_minidumps=false
--local_ip_for_outbound_sockets=127.15.113.2
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-1/logs
--logbuflevel=-1
--logtostderr=true

Tablet server version:
kudu 1.18.0-SNAPSHOT
revision 586cfc20905df35f49541b53af4442736790b0c1
build type FASTDEBUG
built by None at 11 Apr 2025 13:43:20 UTC on 5fd53c4cbb9d
build id 5571
TSAN enabled
I20250411 13:58:03.698170 20767 env_posix.cc:2264] Not raising this process' open files per process limit of 1048576; it is already as high as it can go
I20250411 13:58:03.699859 20767 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:58:03.720650 20774 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:58:04.338358 20757 heartbeater.cc:499] Master 127.15.113.60:39549 was elected leader, sending a full tablet report...
W20250411 13:58:03.721275 20773 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:58:03.725184 20776 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:58:04.853700 20775 instance_detector.cc:116] could not retrieve GCE instance metadata: Timed out: curl timeout: Timeout was reached: Connection time-out
I20250411 13:58:04.853821 20767 server_base.cc:1029] Not found: could not retrieve instance metadata: unable to detect cloud type of this node, probably running in non-cloud environment
I20250411 13:58:04.858222 20767 hybrid_clock.cc:584] initializing the hybrid clock with 'builtin' time source
I20250411 13:58:04.860875 20767 hybrid_clock.cc:630] waiting up to --ntp_initial_sync_wait_secs=10 seconds for the clock to synchronize
I20250411 13:58:04.862277 20767 hybrid_clock.cc:648] HybridClock initialized: now 1744379884862236 us; error 65 us; skew 500 ppm
I20250411 13:58:04.863091 20767 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:58:04.873708 20767 webserver.cc:466] Webserver started at http://127.15.113.2:41189/ using document root <none> and password file <none>
I20250411 13:58:04.874692 20767 fs_manager.cc:362] Metadata directory not provided
I20250411 13:58:04.874909 20767 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:58:04.875377 20767 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:58:04.879715 20767 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-1/data/instance:
uuid: "f5f00e6f80124adf9c59bc9893aa28d2"
format_stamp: "Formatted at 2025-04-11 13:58:04 on dist-test-slave-jcj2"
I20250411 13:58:04.880774 20767 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-1/wal/instance:
uuid: "f5f00e6f80124adf9c59bc9893aa28d2"
format_stamp: "Formatted at 2025-04-11 13:58:04 on dist-test-slave-jcj2"
I20250411 13:58:04.887490 20767 fs_manager.cc:696] Time spent creating directory manager: real 0.006s	user 0.007s	sys 0.001s
I20250411 13:58:04.893157 20784 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:58:04.894210 20767 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.004s	sys 0.000s
I20250411 13:58:04.894542 20767 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-1/data,/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-1/wal
uuid: "f5f00e6f80124adf9c59bc9893aa28d2"
format_stamp: "Formatted at 2025-04-11 13:58:04 on dist-test-slave-jcj2"
I20250411 13:58:04.894922 20767 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-1/wal
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-1/wal
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-1/data/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:58:04.950444 20767 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:58:04.951894 20767 env_posix.cc:2264] Not raising this process' running threads per effective uid limit of 18446744073709551615; it is already as high as it can go
I20250411 13:58:04.952303 20767 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:58:04.954663 20767 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:58:04.958676 20767 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:58:04.958896 20767 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:58:04.959146 20767 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:58:04.959299 20767 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:58:05.115134 20767 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.2:39531
I20250411 13:58:05.115720 20896 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.2:39531 every 8 connection(s)
I20250411 13:58:05.117569 20767 server_base.cc:1166] Dumped server information to /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-1/data/info.pb
I20250411 13:58:05.126845 15812 external_mini_cluster.cc:1413] Started /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu as pid 20767
I20250411 13:58:05.127548 15812 external_mini_cluster.cc:1427] Reading /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-1/wal/instance
I20250411 13:58:05.167016 15812 external_mini_cluster.cc:1351] Running /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
/tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-2/wal
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-2/data
--block_manager=log
--webserver_interface=localhost
--never_fsync
--enable_minidumps=false
--redact=none
--metrics_log_interval_ms=1000
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-2/logs
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-2/data/info.pb
--server_dump_info_format=pb
--rpc_server_allow_ephemeral_ports
--unlock_experimental_flags
--unlock_unsafe_flags
--logtostderr
--logbuflevel=-1
--ipki_server_key_size=768
--openssl_security_level_override=0
tserver
run
--rpc_bind_addresses=127.15.113.3:0
--local_ip_for_outbound_sockets=127.15.113.3
--webserver_interface=127.15.113.3
--webserver_port=0
--tserver_master_addrs=127.15.113.62:39525,127.15.113.61:38061,127.15.113.60:39549
--builtin_ntp_servers=127.15.113.20:43143
--builtin_ntp_poll_interval_ms=100
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--raft_heartbeat_interval_ms=500
--leader_failure_max_missed_heartbeat_periods=2
--rpc_service_queue_length=200 with env {}
I20250411 13:58:05.179081 20898 heartbeater.cc:344] Connected to a master server at 127.15.113.61:38061
I20250411 13:58:05.179553 20898 heartbeater.cc:461] Registering TS with master...
I20250411 13:58:05.180693 20898 heartbeater.cc:507] Master 127.15.113.61:38061 requested a full tablet report, sending...
I20250411 13:58:05.181816 20897 heartbeater.cc:344] Connected to a master server at 127.15.113.60:39549
I20250411 13:58:05.182170 20897 heartbeater.cc:461] Registering TS with master...
I20250411 13:58:05.183296 20444 ts_manager.cc:194] Registered new tserver with Master: f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531)
I20250411 13:58:05.183825 20897 heartbeater.cc:507] Master 127.15.113.60:39549 requested a full tablet report, sending...
I20250411 13:58:05.185884 20899 heartbeater.cc:344] Connected to a master server at 127.15.113.62:39525
I20250411 13:58:05.186223 20899 heartbeater.cc:461] Registering TS with master...
I20250411 13:58:05.186798 20523 ts_manager.cc:194] Registered new tserver with Master: f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531)
I20250411 13:58:05.187218 20899 heartbeater.cc:507] Master 127.15.113.62:39525 requested a full tablet report, sending...
I20250411 13:58:05.188477 20523 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.15.113.2:54269
I20250411 13:58:05.190699 20369 ts_manager.cc:194] Registered new tserver with Master: f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531)
W20250411 13:58:05.518627 20907 flags.cc:425] Enabled unsafe flag: --openssl_security_level_override=0
W20250411 13:58:05.519189 20907 flags.cc:425] Enabled unsafe flag: --rpc_server_allow_ephemeral_ports=true
W20250411 13:58:05.519709 20907 flags.cc:425] Enabled unsafe flag: --never_fsync=true
W20250411 13:58:05.550650 20907 flags.cc:425] Enabled experimental flag: --ipki_server_key_size=768
W20250411 13:58:05.551600 20907 flags.cc:425] Enabled experimental flag: --local_ip_for_outbound_sockets=127.15.113.3
I20250411 13:58:05.587049 20907 tablet_server_runner.cc:78] Tablet server non-default flags:
--builtin_ntp_poll_interval_ms=100
--builtin_ntp_servers=127.15.113.20:43143
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--leader_failure_max_missed_heartbeat_periods=2
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-2/data
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-2/wal
--ipki_server_key_size=768
--openssl_security_level_override=0
--rpc_bind_addresses=127.15.113.3:0
--rpc_server_allow_ephemeral_ports=true
--rpc_service_queue_length=200
--metrics_log_interval_ms=1000
--server_dump_info_format=pb
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-2/data/info.pb
--webserver_interface=127.15.113.3
--webserver_port=0
--tserver_master_addrs=127.15.113.62:39525,127.15.113.61:38061,127.15.113.60:39549
--never_fsync=true
--redact=none
--unlock_experimental_flags=true
--unlock_unsafe_flags=true
--enable_minidumps=false
--local_ip_for_outbound_sockets=127.15.113.3
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-2/logs
--logbuflevel=-1
--logtostderr=true

Tablet server version:
kudu 1.18.0-SNAPSHOT
revision 586cfc20905df35f49541b53af4442736790b0c1
build type FASTDEBUG
built by None at 11 Apr 2025 13:43:20 UTC on 5fd53c4cbb9d
build id 5571
TSAN enabled
I20250411 13:58:05.588380 20907 env_posix.cc:2264] Not raising this process' open files per process limit of 1048576; it is already as high as it can go
I20250411 13:58:05.590119 20907 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:58:05.607905 20916 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:58:06.192422 20897 heartbeater.cc:499] Master 127.15.113.60:39549 was elected leader, sending a full tablet report...
W20250411 13:58:05.608737 20914 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:58:05.609004 20913 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:58:05.609656 20907 server_base.cc:1034] running on GCE node
I20250411 13:58:06.723873 20907 hybrid_clock.cc:584] initializing the hybrid clock with 'builtin' time source
I20250411 13:58:06.725944 20907 hybrid_clock.cc:630] waiting up to --ntp_initial_sync_wait_secs=10 seconds for the clock to synchronize
I20250411 13:58:06.727265 20907 hybrid_clock.cc:648] HybridClock initialized: now 1744379886727246 us; error 45 us; skew 500 ppm
I20250411 13:58:06.728116 20907 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:58:06.734263 20907 webserver.cc:466] Webserver started at http://127.15.113.3:37393/ using document root <none> and password file <none>
I20250411 13:58:06.735241 20907 fs_manager.cc:362] Metadata directory not provided
I20250411 13:58:06.735455 20907 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:58:06.735890 20907 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:58:06.740376 20907 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-2/data/instance:
uuid: "22912eefbeb34010bb4bb7c0acaddec2"
format_stamp: "Formatted at 2025-04-11 13:58:06 on dist-test-slave-jcj2"
I20250411 13:58:06.741459 20907 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-2/wal/instance:
uuid: "22912eefbeb34010bb4bb7c0acaddec2"
format_stamp: "Formatted at 2025-04-11 13:58:06 on dist-test-slave-jcj2"
I20250411 13:58:06.748397 20907 fs_manager.cc:696] Time spent creating directory manager: real 0.006s	user 0.000s	sys 0.008s
I20250411 13:58:06.753919 20924 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:58:06.755005 20907 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.003s	sys 0.000s
I20250411 13:58:06.755326 20907 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-2/data,/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-2/wal
uuid: "22912eefbeb34010bb4bb7c0acaddec2"
format_stamp: "Formatted at 2025-04-11 13:58:06 on dist-test-slave-jcj2"
I20250411 13:58:06.755673 20907 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-2/wal
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-2/wal
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-2/data/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:58:06.824638 20907 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:58:06.826058 20907 env_posix.cc:2264] Not raising this process' running threads per effective uid limit of 18446744073709551615; it is already as high as it can go
I20250411 13:58:06.826510 20907 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:58:06.828992 20907 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:58:06.833145 20907 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:58:06.833352 20907 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:58:06.833629 20907 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:58:06.833787 20907 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:58:06.980119 20907 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.3:33391
I20250411 13:58:06.980209 21036 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.3:33391 every 8 connection(s)
I20250411 13:58:06.982486 20907 server_base.cc:1166] Dumped server information to /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-2/data/info.pb
I20250411 13:58:06.984205 15812 external_mini_cluster.cc:1413] Started /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu as pid 20907
I20250411 13:58:06.984767 15812 external_mini_cluster.cc:1427] Reading /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.AlterTable.1744379822517842-15812-0/minicluster-data/ts-2/wal/instance
I20250411 13:58:07.032464 21038 heartbeater.cc:344] Connected to a master server at 127.15.113.61:38061
I20250411 13:58:07.033952 21038 heartbeater.cc:461] Registering TS with master...
I20250411 13:58:07.035236 21038 heartbeater.cc:507] Master 127.15.113.61:38061 requested a full tablet report, sending...
I20250411 13:58:07.037015 21043 heartbeater.cc:344] Connected to a master server at 127.15.113.62:39525
I20250411 13:58:07.037307 21043 heartbeater.cc:461] Registering TS with master...
I20250411 13:58:07.038040 21043 heartbeater.cc:507] Master 127.15.113.62:39525 requested a full tablet report, sending...
I20250411 13:58:07.038673 20444 ts_manager.cc:194] Registered new tserver with Master: 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:07.040234 20369 ts_manager.cc:194] Registered new tserver with Master: 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:07.043150 21037 heartbeater.cc:344] Connected to a master server at 127.15.113.60:39549
I20250411 13:58:07.043367 21037 heartbeater.cc:461] Registering TS with master...
I20250411 13:58:07.043901 21037 heartbeater.cc:507] Master 127.15.113.60:39549 requested a full tablet report, sending...
I20250411 13:58:07.045308 20523 ts_manager.cc:194] Registered new tserver with Master: 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:07.046526 20523 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.15.113.3:60731
I20250411 13:58:07.057482 15812 external_mini_cluster.cc:934] 3 TS(s) registered with all masters
I20250411 13:58:07.100596 20523 catalog_manager.cc:2232] Servicing CreateTable request from {username='slave'} at 127.0.0.1:43002:
name: "table_to_alter"
schema {
  columns {
    name: "key"
    type: INT64
    is_key: true
    is_nullable: false
    encoding: AUTO_ENCODING
    compression: DEFAULT_COMPRESSION
    cfile_block_size: 0
    immutable: false
  }
  columns {
    name: "string_column"
    type: STRING
    is_key: false
    is_nullable: true
    encoding: AUTO_ENCODING
    compression: DEFAULT_COMPRESSION
    cfile_block_size: 0
    immutable: false
  }
}
num_replicas: 3
split_rows_range_bounds {
}
partition_schema {
  hash_schema {
    columns {
      name: "key"
    }
    num_buckets: 10
    seed: 0
  }
  range_schema {
    columns {
      name: "key"
    }
  }
}
dimension_label: "table_to_alter_very_loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooonooooog_label_suffix"
W20250411 13:58:07.103579 20523 catalog_manager.cc:6943] The number of live tablet servers is not enough to re-replicate a tablet replica of the newly created table table_to_alter in case of a server failure: 4 tablet servers would be needed, 3 are available. Consider bringing up more tablet servers.
I20250411 13:58:07.233201 20690 tablet_service.cc:1467] Processing CreateTablet for tablet 163e127ff588462996fe89e47afb9ee3 (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 2, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.233436 20830 tablet_service.cc:1467] Processing CreateTablet for tablet 163e127ff588462996fe89e47afb9ee3 (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 2, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.235989 20826 tablet_service.cc:1467] Processing CreateTablet for tablet 46a1673aabd54e5f83f6b401b54bf2dc (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 6, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.237329 20688 tablet_service.cc:1467] Processing CreateTablet for tablet e05ab5d0d86c4259ae74d3dca627589c (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 4, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.238577 20690 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 163e127ff588462996fe89e47afb9ee3. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.229139 20968 tablet_service.cc:1467] Processing CreateTablet for tablet e05ab5d0d86c4259ae74d3dca627589c (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 4, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.238909 20683 tablet_service.cc:1467] Processing CreateTablet for tablet 82ee2b302bbb436ab32dd61acbf16229 (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 9, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.223937 20967 tablet_service.cc:1467] Processing CreateTablet for tablet 05333cbabc6e492d9d8bb10b6394a9ad (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 5, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.239959 20688 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet e05ab5d0d86c4259ae74d3dca627589c. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.240542 20828 tablet_service.cc:1467] Processing CreateTablet for tablet e05ab5d0d86c4259ae74d3dca627589c (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 4, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.240990 20685 tablet_service.cc:1467] Processing CreateTablet for tablet 7a5ac5676226458ead4471475edfb6f7 (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 7, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.241797 20967 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 05333cbabc6e492d9d8bb10b6394a9ad. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.222644 20971 tablet_service.cc:1467] Processing CreateTablet for tablet a8eb3b2828c745c591f3b57222005ae4 (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 1, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.234344 20684 tablet_service.cc:1467] Processing CreateTablet for tablet f06a30b6a1ae4fc98270958c903ee84e (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 8, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.242480 20831 tablet_service.cc:1467] Processing CreateTablet for tablet a8eb3b2828c745c591f3b57222005ae4 (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 1, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.235772 20691 tablet_service.cc:1467] Processing CreateTablet for tablet a8eb3b2828c745c591f3b57222005ae4 (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 1, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.235805 20689 tablet_service.cc:1467] Processing CreateTablet for tablet 92dee3062e3e4cebb49c41c569889f43 (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 3, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.243801 20685 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 7a5ac5676226458ead4471475edfb6f7. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.225804 20970 tablet_service.cc:1467] Processing CreateTablet for tablet 163e127ff588462996fe89e47afb9ee3 (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 2, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.245458 20684 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet f06a30b6a1ae4fc98270958c903ee84e. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.245723 20970 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 163e127ff588462996fe89e47afb9ee3. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.246186 20689 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 92dee3062e3e4cebb49c41c569889f43. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.247445 20691 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet a8eb3b2828c745c591f3b57222005ae4. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.247874 20827 tablet_service.cc:1467] Processing CreateTablet for tablet 05333cbabc6e492d9d8bb10b6394a9ad (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 5, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.248481 20692 tablet_service.cc:1467] Processing CreateTablet for tablet b0a853b842e54d5d993fc9199fd7dc0f (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 0, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.248775 20683 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 82ee2b302bbb436ab32dd61acbf16229. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.248735 20829 tablet_service.cc:1467] Processing CreateTablet for tablet 92dee3062e3e4cebb49c41c569889f43 (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 3, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.249828 20692 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet b0a853b842e54d5d993fc9199fd7dc0f. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.227696 20966 tablet_service.cc:1467] Processing CreateTablet for tablet 46a1673aabd54e5f83f6b401b54bf2dc (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 6, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.250456 20832 tablet_service.cc:1467] Processing CreateTablet for tablet b0a853b842e54d5d993fc9199fd7dc0f (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 0, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.250933 20687 tablet_service.cc:1467] Processing CreateTablet for tablet 05333cbabc6e492d9d8bb10b6394a9ad (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 5, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.251935 20830 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 163e127ff588462996fe89e47afb9ee3. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.252056 20687 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 05333cbabc6e492d9d8bb10b6394a9ad. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.253115 20831 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet a8eb3b2828c745c591f3b57222005ae4. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.251027 20969 tablet_service.cc:1467] Processing CreateTablet for tablet 92dee3062e3e4cebb49c41c569889f43 (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 3, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.253497 20686 tablet_service.cc:1467] Processing CreateTablet for tablet 46a1673aabd54e5f83f6b401b54bf2dc (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 6, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.253865 20832 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet b0a853b842e54d5d993fc9199fd7dc0f. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.254510 20686 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 46a1673aabd54e5f83f6b401b54bf2dc. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.254696 20828 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet e05ab5d0d86c4259ae74d3dca627589c. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.266925 20972 tablet_service.cc:1467] Processing CreateTablet for tablet b0a853b842e54d5d993fc9199fd7dc0f (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 0, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.270483 20971 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet a8eb3b2828c745c591f3b57222005ae4. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.272083 20965 tablet_service.cc:1467] Processing CreateTablet for tablet 7a5ac5676226458ead4471475edfb6f7 (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 7, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.274047 20972 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet b0a853b842e54d5d993fc9199fd7dc0f. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.275177 20968 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet e05ab5d0d86c4259ae74d3dca627589c. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.275146 20964 tablet_service.cc:1467] Processing CreateTablet for tablet f06a30b6a1ae4fc98270958c903ee84e (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 8, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.275758 20969 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 92dee3062e3e4cebb49c41c569889f43. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.276360 20964 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet f06a30b6a1ae4fc98270958c903ee84e. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.277853 20965 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 7a5ac5676226458ead4471475edfb6f7. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.278790 20963 tablet_service.cc:1467] Processing CreateTablet for tablet 82ee2b302bbb436ab32dd61acbf16229 (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 9, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.279903 20963 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 82ee2b302bbb436ab32dd61acbf16229. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.299306 21070 tablet_bootstrap.cc:492] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168: Bootstrap starting.
I20250411 13:58:07.307976 21070 tablet_bootstrap.cc:654] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:07.310598 21070 log.cc:826] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168: Log is configured to *not* fsync() on all Append() calls
I20250411 13:58:07.315352 20825 tablet_service.cc:1467] Processing CreateTablet for tablet 7a5ac5676226458ead4471475edfb6f7 (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 7, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.318022 20826 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 46a1673aabd54e5f83f6b401b54bf2dc. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.319497 20823 tablet_service.cc:1467] Processing CreateTablet for tablet 82ee2b302bbb436ab32dd61acbf16229 (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 9, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.320618 20823 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 82ee2b302bbb436ab32dd61acbf16229. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.322111 20829 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 92dee3062e3e4cebb49c41c569889f43. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.318212 20824 tablet_service.cc:1467] Processing CreateTablet for tablet f06a30b6a1ae4fc98270958c903ee84e (DEFAULT_TABLE table=table_to_alter [id=79df1daf41cf4e3d846884eae0bfe60d]), partition=HASH (key) PARTITION 8, RANGE (key) PARTITION UNBOUNDED
I20250411 13:58:07.324575 20824 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet f06a30b6a1ae4fc98270958c903ee84e. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.325584 20827 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 05333cbabc6e492d9d8bb10b6394a9ad. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.326598 20825 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 7a5ac5676226458ead4471475edfb6f7. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.328943 20966 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 46a1673aabd54e5f83f6b401b54bf2dc. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:07.405112 21072 tablet_bootstrap.cc:492] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2: Bootstrap starting.
I20250411 13:58:07.424291 21072 tablet_bootstrap.cc:654] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:07.426748 21070 tablet_bootstrap.cc:492] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168: No bootstrap required, opened a new log
I20250411 13:58:07.428802 21072 log.cc:826] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2: Log is configured to *not* fsync() on all Append() calls
I20250411 13:58:07.430560 21070 ts_tablet_manager.cc:1397] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168: Time spent bootstrapping tablet: real 0.132s	user 0.015s	sys 0.004s
I20250411 13:58:07.458667 21070 raft_consensus.cc:357] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.459810 21070 raft_consensus.cc:383] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:07.460175 21070 raft_consensus.cc:738] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 122f34c0ee774ec9a21f147299a21168, State: Initialized, Role: FOLLOWER
I20250411 13:58:07.461242 21070 consensus_queue.cc:260] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.470208 21074 tablet_bootstrap.cc:492] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2: Bootstrap starting.
I20250411 13:58:07.471535 21070 ts_tablet_manager.cc:1428] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168: Time spent starting tablet: real 0.040s	user 0.029s	sys 0.007s
I20250411 13:58:07.474941 21070 tablet_bootstrap.cc:492] T 46a1673aabd54e5f83f6b401b54bf2dc P 122f34c0ee774ec9a21f147299a21168: Bootstrap starting.
I20250411 13:58:07.476436 21072 tablet_bootstrap.cc:492] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2: No bootstrap required, opened a new log
I20250411 13:58:07.476974 21072 ts_tablet_manager.cc:1397] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2: Time spent bootstrapping tablet: real 0.073s	user 0.027s	sys 0.029s
I20250411 13:58:07.483776 21070 tablet_bootstrap.cc:654] T 46a1673aabd54e5f83f6b401b54bf2dc P 122f34c0ee774ec9a21f147299a21168: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:07.496076 21074 tablet_bootstrap.cc:654] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:07.498818 21074 log.cc:826] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2: Log is configured to *not* fsync() on all Append() calls
I20250411 13:58:07.503682 21070 tablet_bootstrap.cc:492] T 46a1673aabd54e5f83f6b401b54bf2dc P 122f34c0ee774ec9a21f147299a21168: No bootstrap required, opened a new log
I20250411 13:58:07.504177 21070 ts_tablet_manager.cc:1397] T 46a1673aabd54e5f83f6b401b54bf2dc P 122f34c0ee774ec9a21f147299a21168: Time spent bootstrapping tablet: real 0.030s	user 0.011s	sys 0.015s
I20250411 13:58:07.505357 21074 tablet_bootstrap.cc:492] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2: No bootstrap required, opened a new log
I20250411 13:58:07.505860 21074 ts_tablet_manager.cc:1397] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2: Time spent bootstrapping tablet: real 0.036s	user 0.015s	sys 0.000s
I20250411 13:58:07.507241 21070 raft_consensus.cc:357] T 46a1673aabd54e5f83f6b401b54bf2dc P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.507954 21070 raft_consensus.cc:383] T 46a1673aabd54e5f83f6b401b54bf2dc P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:07.508226 21070 raft_consensus.cc:738] T 46a1673aabd54e5f83f6b401b54bf2dc P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 122f34c0ee774ec9a21f147299a21168, State: Initialized, Role: FOLLOWER
I20250411 13:58:07.508265 21072 raft_consensus.cc:357] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.509148 21072 raft_consensus.cc:383] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:07.508906 21070 consensus_queue.cc:260] T 46a1673aabd54e5f83f6b401b54bf2dc P 122f34c0ee774ec9a21f147299a21168 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.509430 21072 raft_consensus.cc:738] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 22912eefbeb34010bb4bb7c0acaddec2, State: Initialized, Role: FOLLOWER
I20250411 13:58:07.510231 21072 consensus_queue.cc:260] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.513500 21070 ts_tablet_manager.cc:1428] T 46a1673aabd54e5f83f6b401b54bf2dc P 122f34c0ee774ec9a21f147299a21168: Time spent starting tablet: real 0.009s	user 0.005s	sys 0.000s
I20250411 13:58:07.514367 21070 tablet_bootstrap.cc:492] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168: Bootstrap starting.
I20250411 13:58:07.520553 21070 tablet_bootstrap.cc:654] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:07.532821 21070 tablet_bootstrap.cc:492] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168: No bootstrap required, opened a new log
I20250411 13:58:07.533397 21070 ts_tablet_manager.cc:1397] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168: Time spent bootstrapping tablet: real 0.019s	user 0.013s	sys 0.004s
I20250411 13:58:07.535866 21070 raft_consensus.cc:357] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.536761 21070 raft_consensus.cc:383] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:07.537091 21070 raft_consensus.cc:738] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 122f34c0ee774ec9a21f147299a21168, State: Initialized, Role: FOLLOWER
I20250411 13:58:07.537742 21070 consensus_queue.cc:260] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.540671 21070 ts_tablet_manager.cc:1428] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168: Time spent starting tablet: real 0.007s	user 0.005s	sys 0.000s
I20250411 13:58:07.541520 21070 tablet_bootstrap.cc:492] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168: Bootstrap starting.
I20250411 13:58:07.541051 21074 raft_consensus.cc:357] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.543954 21074 raft_consensus.cc:383] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:07.544353 21074 raft_consensus.cc:738] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: f5f00e6f80124adf9c59bc9893aa28d2, State: Initialized, Role: FOLLOWER
I20250411 13:58:07.545550 21074 consensus_queue.cc:260] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.547887 21070 tablet_bootstrap.cc:654] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168: Neither blocks nor log segments found. Creating new log.
W20250411 13:58:07.553913 20764 tablet.cc:2367] T 46a1673aabd54e5f83f6b401b54bf2dc P 122f34c0ee774ec9a21f147299a21168: Can't schedule compaction. Clean time has not been advanced past its initial value.
I20250411 13:58:07.557366 21074 ts_tablet_manager.cc:1428] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2: Time spent starting tablet: real 0.051s	user 0.032s	sys 0.013s
I20250411 13:58:07.563262 21070 tablet_bootstrap.cc:492] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168: No bootstrap required, opened a new log
I20250411 13:58:07.563769 21070 ts_tablet_manager.cc:1397] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168: Time spent bootstrapping tablet: real 0.022s	user 0.008s	sys 0.008s
I20250411 13:58:07.566143 21070 raft_consensus.cc:357] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } } peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } }
I20250411 13:58:07.566952 21070 raft_consensus.cc:383] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:07.567255 21070 raft_consensus.cc:738] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 122f34c0ee774ec9a21f147299a21168, State: Initialized, Role: FOLLOWER
I20250411 13:58:07.568038 21070 consensus_queue.cc:260] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } } peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } }
I20250411 13:58:07.570747 21070 ts_tablet_manager.cc:1428] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168: Time spent starting tablet: real 0.007s	user 0.006s	sys 0.000s
I20250411 13:58:07.571907 21074 tablet_bootstrap.cc:492] T a8eb3b2828c745c591f3b57222005ae4 P f5f00e6f80124adf9c59bc9893aa28d2: Bootstrap starting.
I20250411 13:58:07.583591 21070 tablet_bootstrap.cc:492] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168: Bootstrap starting.
I20250411 13:58:07.590406 21070 tablet_bootstrap.cc:654] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:07.590461 21074 tablet_bootstrap.cc:654] T a8eb3b2828c745c591f3b57222005ae4 P f5f00e6f80124adf9c59bc9893aa28d2: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:07.605389 21037 heartbeater.cc:499] Master 127.15.113.60:39549 was elected leader, sending a full tablet report...
I20250411 13:58:07.606364 21072 ts_tablet_manager.cc:1428] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2: Time spent starting tablet: real 0.129s	user 0.070s	sys 0.055s
I20250411 13:58:07.607337 21072 tablet_bootstrap.cc:492] T 92dee3062e3e4cebb49c41c569889f43 P 22912eefbeb34010bb4bb7c0acaddec2: Bootstrap starting.
I20250411 13:58:07.610015 21074 tablet_bootstrap.cc:492] T a8eb3b2828c745c591f3b57222005ae4 P f5f00e6f80124adf9c59bc9893aa28d2: No bootstrap required, opened a new log
I20250411 13:58:07.610412 21074 ts_tablet_manager.cc:1397] T a8eb3b2828c745c591f3b57222005ae4 P f5f00e6f80124adf9c59bc9893aa28d2: Time spent bootstrapping tablet: real 0.039s	user 0.015s	sys 0.005s
I20250411 13:58:07.612046 21070 tablet_bootstrap.cc:492] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168: No bootstrap required, opened a new log
I20250411 13:58:07.612502 21070 ts_tablet_manager.cc:1397] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168: Time spent bootstrapping tablet: real 0.029s	user 0.008s	sys 0.003s
I20250411 13:58:07.612949 21074 raft_consensus.cc:357] T a8eb3b2828c745c591f3b57222005ae4 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.613637 21074 raft_consensus.cc:383] T a8eb3b2828c745c591f3b57222005ae4 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:07.613907 21074 raft_consensus.cc:738] T a8eb3b2828c745c591f3b57222005ae4 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: f5f00e6f80124adf9c59bc9893aa28d2, State: Initialized, Role: FOLLOWER
I20250411 13:58:07.614645 21074 consensus_queue.cc:260] T a8eb3b2828c745c591f3b57222005ae4 P f5f00e6f80124adf9c59bc9893aa28d2 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.615391 21072 tablet_bootstrap.cc:654] T 92dee3062e3e4cebb49c41c569889f43 P 22912eefbeb34010bb4bb7c0acaddec2: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:07.616083 21070 raft_consensus.cc:357] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.622999 21070 raft_consensus.cc:383] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:07.623368 21070 raft_consensus.cc:738] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 122f34c0ee774ec9a21f147299a21168, State: Initialized, Role: FOLLOWER
I20250411 13:58:07.624111 21070 consensus_queue.cc:260] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.625965 21074 ts_tablet_manager.cc:1428] T a8eb3b2828c745c591f3b57222005ae4 P f5f00e6f80124adf9c59bc9893aa28d2: Time spent starting tablet: real 0.015s	user 0.003s	sys 0.003s
I20250411 13:58:07.627251 21070 ts_tablet_manager.cc:1428] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168: Time spent starting tablet: real 0.014s	user 0.004s	sys 0.003s
I20250411 13:58:07.628203 21070 tablet_bootstrap.cc:492] T 82ee2b302bbb436ab32dd61acbf16229 P 122f34c0ee774ec9a21f147299a21168: Bootstrap starting.
I20250411 13:58:07.635011 21074 tablet_bootstrap.cc:492] T b0a853b842e54d5d993fc9199fd7dc0f P f5f00e6f80124adf9c59bc9893aa28d2: Bootstrap starting.
I20250411 13:58:07.639266 21070 tablet_bootstrap.cc:654] T 82ee2b302bbb436ab32dd61acbf16229 P 122f34c0ee774ec9a21f147299a21168: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:07.642021 21074 tablet_bootstrap.cc:654] T b0a853b842e54d5d993fc9199fd7dc0f P f5f00e6f80124adf9c59bc9893aa28d2: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:07.643709 21072 tablet_bootstrap.cc:492] T 92dee3062e3e4cebb49c41c569889f43 P 22912eefbeb34010bb4bb7c0acaddec2: No bootstrap required, opened a new log
I20250411 13:58:07.647246 21072 ts_tablet_manager.cc:1397] T 92dee3062e3e4cebb49c41c569889f43 P 22912eefbeb34010bb4bb7c0acaddec2: Time spent bootstrapping tablet: real 0.040s	user 0.012s	sys 0.016s
I20250411 13:58:07.652973 21075 raft_consensus.cc:491] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:58:07.653514 21075 raft_consensus.cc:513] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.660979 21075 leader_election.cc:290] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531), 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:07.650508 21072 raft_consensus.cc:357] T 92dee3062e3e4cebb49c41c569889f43 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
W20250411 13:58:07.662155 20904 tablet.cc:2367] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2: Can't schedule compaction. Clean time has not been advanced past its initial value.
I20250411 13:58:07.662201 21072 raft_consensus.cc:383] T 92dee3062e3e4cebb49c41c569889f43 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:07.662547 21072 raft_consensus.cc:738] T 92dee3062e3e4cebb49c41c569889f43 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 22912eefbeb34010bb4bb7c0acaddec2, State: Initialized, Role: FOLLOWER
I20250411 13:58:07.663370 21072 consensus_queue.cc:260] T 92dee3062e3e4cebb49c41c569889f43 P 22912eefbeb34010bb4bb7c0acaddec2 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.675284 21072 ts_tablet_manager.cc:1428] T 92dee3062e3e4cebb49c41c569889f43 P 22912eefbeb34010bb4bb7c0acaddec2: Time spent starting tablet: real 0.028s	user 0.007s	sys 0.001s
I20250411 13:58:07.676473 21070 tablet_bootstrap.cc:492] T 82ee2b302bbb436ab32dd61acbf16229 P 122f34c0ee774ec9a21f147299a21168: No bootstrap required, opened a new log
I20250411 13:58:07.677008 21070 ts_tablet_manager.cc:1397] T 82ee2b302bbb436ab32dd61acbf16229 P 122f34c0ee774ec9a21f147299a21168: Time spent bootstrapping tablet: real 0.049s	user 0.016s	sys 0.001s
I20250411 13:58:07.680178 21070 raft_consensus.cc:357] T 82ee2b302bbb436ab32dd61acbf16229 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.680994 21070 raft_consensus.cc:383] T 82ee2b302bbb436ab32dd61acbf16229 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:07.681293 21070 raft_consensus.cc:738] T 82ee2b302bbb436ab32dd61acbf16229 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 122f34c0ee774ec9a21f147299a21168, State: Initialized, Role: FOLLOWER
I20250411 13:58:07.682124 21070 consensus_queue.cc:260] T 82ee2b302bbb436ab32dd61acbf16229 P 122f34c0ee774ec9a21f147299a21168 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.686751 21070 ts_tablet_manager.cc:1428] T 82ee2b302bbb436ab32dd61acbf16229 P 122f34c0ee774ec9a21f147299a21168: Time spent starting tablet: real 0.009s	user 0.007s	sys 0.000s
I20250411 13:58:07.687875 21070 tablet_bootstrap.cc:492] T 7a5ac5676226458ead4471475edfb6f7 P 122f34c0ee774ec9a21f147299a21168: Bootstrap starting.
I20250411 13:58:07.688494 21072 tablet_bootstrap.cc:492] T a8eb3b2828c745c591f3b57222005ae4 P 22912eefbeb34010bb4bb7c0acaddec2: Bootstrap starting.
I20250411 13:58:07.693094 21075 raft_consensus.cc:491] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:58:07.693538 21075 raft_consensus.cc:513] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } } peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } }
I20250411 13:58:07.698815 21072 tablet_bootstrap.cc:654] T a8eb3b2828c745c591f3b57222005ae4 P 22912eefbeb34010bb4bb7c0acaddec2: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:07.690699 21074 tablet_bootstrap.cc:492] T b0a853b842e54d5d993fc9199fd7dc0f P f5f00e6f80124adf9c59bc9893aa28d2: No bootstrap required, opened a new log
I20250411 13:58:07.699409 21074 ts_tablet_manager.cc:1397] T b0a853b842e54d5d993fc9199fd7dc0f P f5f00e6f80124adf9c59bc9893aa28d2: Time spent bootstrapping tablet: real 0.065s	user 0.019s	sys 0.020s
I20250411 13:58:07.701890 21074 raft_consensus.cc:357] T b0a853b842e54d5d993fc9199fd7dc0f P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } } peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } }
I20250411 13:58:07.702530 21074 raft_consensus.cc:383] T b0a853b842e54d5d993fc9199fd7dc0f P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:07.702827 21074 raft_consensus.cc:738] T b0a853b842e54d5d993fc9199fd7dc0f P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: f5f00e6f80124adf9c59bc9893aa28d2, State: Initialized, Role: FOLLOWER
I20250411 13:58:07.703603 21074 consensus_queue.cc:260] T b0a853b842e54d5d993fc9199fd7dc0f P f5f00e6f80124adf9c59bc9893aa28d2 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } } peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } }
I20250411 13:58:07.713527 21075 leader_election.cc:290] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391), f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531)
I20250411 13:58:07.719528 21074 ts_tablet_manager.cc:1428] T b0a853b842e54d5d993fc9199fd7dc0f P f5f00e6f80124adf9c59bc9893aa28d2: Time spent starting tablet: real 0.020s	user 0.005s	sys 0.002s
I20250411 13:58:07.722208 20852 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "a8eb3b2828c745c591f3b57222005ae4" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" is_pre_election: true
I20250411 13:58:07.723076 20852 raft_consensus.cc:2466] T a8eb3b2828c745c591f3b57222005ae4 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 122f34c0ee774ec9a21f147299a21168 in term 0.
I20250411 13:58:07.724924 20851 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "b0a853b842e54d5d993fc9199fd7dc0f" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" is_pre_election: true
I20250411 13:58:07.725541 20851 raft_consensus.cc:2466] T b0a853b842e54d5d993fc9199fd7dc0f P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 122f34c0ee774ec9a21f147299a21168 in term 0.
I20250411 13:58:07.726406 21080 raft_consensus.cc:491] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:58:07.726855 21080 raft_consensus.cc:513] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.727854 21070 tablet_bootstrap.cc:654] T 7a5ac5676226458ead4471475edfb6f7 P 122f34c0ee774ec9a21f147299a21168: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:07.728629 21072 tablet_bootstrap.cc:492] T a8eb3b2828c745c591f3b57222005ae4 P 22912eefbeb34010bb4bb7c0acaddec2: No bootstrap required, opened a new log
I20250411 13:58:07.729043 21072 ts_tablet_manager.cc:1397] T a8eb3b2828c745c591f3b57222005ae4 P 22912eefbeb34010bb4bb7c0acaddec2: Time spent bootstrapping tablet: real 0.041s	user 0.012s	sys 0.004s
I20250411 13:58:07.731462 21072 raft_consensus.cc:357] T a8eb3b2828c745c591f3b57222005ae4 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.732138 21072 raft_consensus.cc:383] T a8eb3b2828c745c591f3b57222005ae4 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:07.732401 21072 raft_consensus.cc:738] T a8eb3b2828c745c591f3b57222005ae4 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 22912eefbeb34010bb4bb7c0acaddec2, State: Initialized, Role: FOLLOWER
I20250411 13:58:07.733088 21072 consensus_queue.cc:260] T a8eb3b2828c745c591f3b57222005ae4 P 22912eefbeb34010bb4bb7c0acaddec2 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.737555 20992 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "a8eb3b2828c745c591f3b57222005ae4" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "22912eefbeb34010bb4bb7c0acaddec2" is_pre_election: true
I20250411 13:58:07.738549 20992 raft_consensus.cc:2466] T a8eb3b2828c745c591f3b57222005ae4 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 122f34c0ee774ec9a21f147299a21168 in term 0.
I20250411 13:58:07.740236 20991 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "b0a853b842e54d5d993fc9199fd7dc0f" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "22912eefbeb34010bb4bb7c0acaddec2" is_pre_election: true
I20250411 13:58:07.745441 20645 leader_election.cc:304] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 122f34c0ee774ec9a21f147299a21168, 22912eefbeb34010bb4bb7c0acaddec2; no voters: 
W20250411 13:58:07.746724 20645 leader_election.cc:343] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 pre-election: Tablet error from VoteRequest() call to peer 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391): Illegal state: must be running to vote when last-logged opid is not known
I20250411 13:58:07.747324 21075 raft_consensus.cc:2802] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:58:07.747655 21075 raft_consensus.cc:491] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:58:07.747987 21075 raft_consensus.cc:3058] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:07.750330 21072 ts_tablet_manager.cc:1428] T a8eb3b2828c745c591f3b57222005ae4 P 22912eefbeb34010bb4bb7c0acaddec2: Time spent starting tablet: real 0.021s	user 0.005s	sys 0.001s
I20250411 13:58:07.742452 21080 leader_election.cc:290] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers 122f34c0ee774ec9a21f147299a21168 (127.15.113.1:33621), 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:07.753098 20644 leader_election.cc:304] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 3 responses out of 3 voters: 2 yes votes; 1 no votes. yes voters: 122f34c0ee774ec9a21f147299a21168, f5f00e6f80124adf9c59bc9893aa28d2; no voters: 22912eefbeb34010bb4bb7c0acaddec2
I20250411 13:58:07.756547 21072 tablet_bootstrap.cc:492] T 82ee2b302bbb436ab32dd61acbf16229 P 22912eefbeb34010bb4bb7c0acaddec2: Bootstrap starting.
I20250411 13:58:07.763499 21072 tablet_bootstrap.cc:654] T 82ee2b302bbb436ab32dd61acbf16229 P 22912eefbeb34010bb4bb7c0acaddec2: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:07.766070 21075 raft_consensus.cc:513] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168 [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
W20250411 13:58:07.771804 21044 tablet.cc:2367] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2: Can't schedule compaction. Clean time has not been advanced past its initial value.
I20250411 13:58:07.774686 21074 tablet_bootstrap.cc:492] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2: Bootstrap starting.
I20250411 13:58:07.775444 21094 raft_consensus.cc:2802] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:58:07.775872 21094 raft_consensus.cc:491] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:58:07.776234 21094 raft_consensus.cc:3058] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:07.779820 21072 tablet_bootstrap.cc:492] T 82ee2b302bbb436ab32dd61acbf16229 P 22912eefbeb34010bb4bb7c0acaddec2: No bootstrap required, opened a new log
I20250411 13:58:07.780233 21072 ts_tablet_manager.cc:1397] T 82ee2b302bbb436ab32dd61acbf16229 P 22912eefbeb34010bb4bb7c0acaddec2: Time spent bootstrapping tablet: real 0.024s	user 0.013s	sys 0.004s
I20250411 13:58:07.781083 20991 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "a8eb3b2828c745c591f3b57222005ae4" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "22912eefbeb34010bb4bb7c0acaddec2"
I20250411 13:58:07.781622 20991 raft_consensus.cc:3058] T a8eb3b2828c745c591f3b57222005ae4 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:07.783735 21072 raft_consensus.cc:357] T 82ee2b302bbb436ab32dd61acbf16229 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.784430 21072 raft_consensus.cc:383] T 82ee2b302bbb436ab32dd61acbf16229 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:07.784693 21072 raft_consensus.cc:738] T 82ee2b302bbb436ab32dd61acbf16229 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 22912eefbeb34010bb4bb7c0acaddec2, State: Initialized, Role: FOLLOWER
I20250411 13:58:07.785017 21094 raft_consensus.cc:513] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } } peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } }
I20250411 13:58:07.785322 21072 consensus_queue.cc:260] T 82ee2b302bbb436ab32dd61acbf16229 P 22912eefbeb34010bb4bb7c0acaddec2 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.788075 20992 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "b0a853b842e54d5d993fc9199fd7dc0f" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "22912eefbeb34010bb4bb7c0acaddec2"
I20250411 13:58:07.791252 21072 ts_tablet_manager.cc:1428] T 82ee2b302bbb436ab32dd61acbf16229 P 22912eefbeb34010bb4bb7c0acaddec2: Time spent starting tablet: real 0.011s	user 0.004s	sys 0.003s
I20250411 13:58:07.791366 20991 raft_consensus.cc:2466] T a8eb3b2828c745c591f3b57222005ae4 P 22912eefbeb34010bb4bb7c0acaddec2 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate 122f34c0ee774ec9a21f147299a21168 in term 1.
I20250411 13:58:07.792173 21072 tablet_bootstrap.cc:492] T f06a30b6a1ae4fc98270958c903ee84e P 22912eefbeb34010bb4bb7c0acaddec2: Bootstrap starting.
I20250411 13:58:07.794507 21075 leader_election.cc:290] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 election: Requested vote from peers f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531), 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:07.802548 21072 tablet_bootstrap.cc:654] T f06a30b6a1ae4fc98270958c903ee84e P 22912eefbeb34010bb4bb7c0acaddec2: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:07.803144 20851 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "a8eb3b2828c745c591f3b57222005ae4" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "f5f00e6f80124adf9c59bc9893aa28d2"
I20250411 13:58:07.803759 20851 raft_consensus.cc:3058] T a8eb3b2828c745c591f3b57222005ae4 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Advancing to term 1
W20250411 13:58:07.806564 20645 leader_election.cc:343] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 election: Tablet error from VoteRequest() call to peer 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391): Illegal state: must be running to vote when last-logged opid is not known
I20250411 13:58:07.807241 20645 leader_election.cc:304] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 122f34c0ee774ec9a21f147299a21168, 22912eefbeb34010bb4bb7c0acaddec2; no voters: 
I20250411 13:58:07.787973 21094 leader_election.cc:290] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 election: Requested vote from peers 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391), f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531)
I20250411 13:58:07.812323 20851 raft_consensus.cc:2466] T a8eb3b2828c745c591f3b57222005ae4 P f5f00e6f80124adf9c59bc9893aa28d2 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate 122f34c0ee774ec9a21f147299a21168 in term 1.
I20250411 13:58:07.812497 21094 raft_consensus.cc:2802] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168 [term 1 FOLLOWER]: Leader election won for term 1
I20250411 13:58:07.813037 21094 raft_consensus.cc:695] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168 [term 1 LEADER]: Becoming Leader. State: Replica: 122f34c0ee774ec9a21f147299a21168, State: Running, Role: LEADER
I20250411 13:58:07.814404 21094 consensus_queue.cc:237] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168 [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 1, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.819947 21074 tablet_bootstrap.cc:654] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:07.823238 20851 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "b0a853b842e54d5d993fc9199fd7dc0f" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "f5f00e6f80124adf9c59bc9893aa28d2"
I20250411 13:58:07.823649 21072 tablet_bootstrap.cc:492] T f06a30b6a1ae4fc98270958c903ee84e P 22912eefbeb34010bb4bb7c0acaddec2: No bootstrap required, opened a new log
I20250411 13:58:07.823937 20851 raft_consensus.cc:3058] T b0a853b842e54d5d993fc9199fd7dc0f P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:07.824381 21072 ts_tablet_manager.cc:1397] T f06a30b6a1ae4fc98270958c903ee84e P 22912eefbeb34010bb4bb7c0acaddec2: Time spent bootstrapping tablet: real 0.032s	user 0.006s	sys 0.011s
I20250411 13:58:07.827365 21072 raft_consensus.cc:357] T f06a30b6a1ae4fc98270958c903ee84e P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.828145 21072 raft_consensus.cc:383] T f06a30b6a1ae4fc98270958c903ee84e P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:07.828438 21072 raft_consensus.cc:738] T f06a30b6a1ae4fc98270958c903ee84e P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 22912eefbeb34010bb4bb7c0acaddec2, State: Initialized, Role: FOLLOWER
I20250411 13:58:07.829209 21072 consensus_queue.cc:260] T f06a30b6a1ae4fc98270958c903ee84e P 22912eefbeb34010bb4bb7c0acaddec2 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.831432 21072 ts_tablet_manager.cc:1428] T f06a30b6a1ae4fc98270958c903ee84e P 22912eefbeb34010bb4bb7c0acaddec2: Time spent starting tablet: real 0.006s	user 0.005s	sys 0.001s
I20250411 13:58:07.833575 21070 tablet_bootstrap.cc:492] T 7a5ac5676226458ead4471475edfb6f7 P 122f34c0ee774ec9a21f147299a21168: No bootstrap required, opened a new log
I20250411 13:58:07.833953 21070 ts_tablet_manager.cc:1397] T 7a5ac5676226458ead4471475edfb6f7 P 122f34c0ee774ec9a21f147299a21168: Time spent bootstrapping tablet: real 0.146s	user 0.029s	sys 0.041s
I20250411 13:58:07.836265 21070 raft_consensus.cc:357] T 7a5ac5676226458ead4471475edfb6f7 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.836926 21070 raft_consensus.cc:383] T 7a5ac5676226458ead4471475edfb6f7 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:07.837186 21070 raft_consensus.cc:738] T 7a5ac5676226458ead4471475edfb6f7 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 122f34c0ee774ec9a21f147299a21168, State: Initialized, Role: FOLLOWER
I20250411 13:58:07.838033 21072 tablet_bootstrap.cc:492] T 163e127ff588462996fe89e47afb9ee3 P 22912eefbeb34010bb4bb7c0acaddec2: Bootstrap starting.
I20250411 13:58:07.837765 21070 consensus_queue.cc:260] T 7a5ac5676226458ead4471475edfb6f7 P 122f34c0ee774ec9a21f147299a21168 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.839624 21070 ts_tablet_manager.cc:1428] T 7a5ac5676226458ead4471475edfb6f7 P 122f34c0ee774ec9a21f147299a21168: Time spent starting tablet: real 0.005s	user 0.006s	sys 0.001s
I20250411 13:58:07.849395 20522 catalog_manager.cc:5581] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168 reported cstate change: term changed from 0 to 1, leader changed from <none> to 122f34c0ee774ec9a21f147299a21168 (127.15.113.1). New cstate: current_term: 1 leader_uuid: "122f34c0ee774ec9a21f147299a21168" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } health_report { overall_health: HEALTHY } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } health_report { overall_health: UNKNOWN } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } health_report { overall_health: UNKNOWN } } }
I20250411 13:58:07.853171 20991 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "82ee2b302bbb436ab32dd61acbf16229" candidate_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "22912eefbeb34010bb4bb7c0acaddec2" is_pre_election: true
I20250411 13:58:07.853819 20991 raft_consensus.cc:2466] T 82ee2b302bbb436ab32dd61acbf16229 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate f5f00e6f80124adf9c59bc9893aa28d2 in term 0.
I20250411 13:58:07.855192 21072 tablet_bootstrap.cc:654] T 163e127ff588462996fe89e47afb9ee3 P 22912eefbeb34010bb4bb7c0acaddec2: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:07.855051 20786 leader_election.cc:304] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 22912eefbeb34010bb4bb7c0acaddec2, f5f00e6f80124adf9c59bc9893aa28d2; no voters: 
I20250411 13:58:07.855855 21080 raft_consensus.cc:2802] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:58:07.856170 21080 raft_consensus.cc:491] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:58:07.857007 21080 raft_consensus.cc:3058] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:07.860680 20851 raft_consensus.cc:2466] T b0a853b842e54d5d993fc9199fd7dc0f P f5f00e6f80124adf9c59bc9893aa28d2 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate 122f34c0ee774ec9a21f147299a21168 in term 1.
I20250411 13:58:07.861702 20644 leader_election.cc:304] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 election: Election decided. Result: candidate won. Election summary: received 3 responses out of 3 voters: 2 yes votes; 1 no votes. yes voters: 122f34c0ee774ec9a21f147299a21168, f5f00e6f80124adf9c59bc9893aa28d2; no voters: 22912eefbeb34010bb4bb7c0acaddec2
I20250411 13:58:07.863243 21070 tablet_bootstrap.cc:492] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168: Bootstrap starting.
I20250411 13:58:07.863873 20712 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "82ee2b302bbb436ab32dd61acbf16229" candidate_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "122f34c0ee774ec9a21f147299a21168" is_pre_election: true
I20250411 13:58:07.864573 20712 raft_consensus.cc:2466] T 82ee2b302bbb436ab32dd61acbf16229 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate f5f00e6f80124adf9c59bc9893aa28d2 in term 0.
I20250411 13:58:07.869205 21075 raft_consensus.cc:2802] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 [term 1 FOLLOWER]: Leader election won for term 1
I20250411 13:58:07.872866 21080 raft_consensus.cc:513] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2 [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.876569 21074 tablet_bootstrap.cc:492] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2: No bootstrap required, opened a new log
I20250411 13:58:07.877102 21074 ts_tablet_manager.cc:1397] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2: Time spent bootstrapping tablet: real 0.103s	user 0.008s	sys 0.003s
I20250411 13:58:07.877351 20991 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "82ee2b302bbb436ab32dd61acbf16229" candidate_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "22912eefbeb34010bb4bb7c0acaddec2"
I20250411 13:58:07.878166 20991 raft_consensus.cc:3058] T 82ee2b302bbb436ab32dd61acbf16229 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:07.880460 21074 raft_consensus.cc:357] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.881193 21074 raft_consensus.cc:383] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:07.882169 21074 raft_consensus.cc:738] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: f5f00e6f80124adf9c59bc9893aa28d2, State: Initialized, Role: FOLLOWER
I20250411 13:58:07.882860 21074 consensus_queue.cc:260] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.884611 20712 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "82ee2b302bbb436ab32dd61acbf16229" candidate_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "122f34c0ee774ec9a21f147299a21168"
I20250411 13:58:07.885159 20712 raft_consensus.cc:3058] T 82ee2b302bbb436ab32dd61acbf16229 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:07.889721 21094 raft_consensus.cc:491] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:58:07.890591 21094 raft_consensus.cc:513] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.893644 21074 ts_tablet_manager.cc:1428] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2: Time spent starting tablet: real 0.016s	user 0.010s	sys 0.005s
I20250411 13:58:07.896543 21074 tablet_bootstrap.cc:492] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2: Bootstrap starting.
I20250411 13:58:07.896817 20992 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "92dee3062e3e4cebb49c41c569889f43" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "22912eefbeb34010bb4bb7c0acaddec2" is_pre_election: true
I20250411 13:58:07.897395 20992 raft_consensus.cc:2466] T 92dee3062e3e4cebb49c41c569889f43 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 122f34c0ee774ec9a21f147299a21168 in term 0.
I20250411 13:58:07.898970 20645 leader_election.cc:304] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 122f34c0ee774ec9a21f147299a21168, 22912eefbeb34010bb4bb7c0acaddec2; no voters: 
I20250411 13:58:07.903913 21074 tablet_bootstrap.cc:654] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:07.907752 20712 raft_consensus.cc:2466] T 82ee2b302bbb436ab32dd61acbf16229 P 122f34c0ee774ec9a21f147299a21168 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate f5f00e6f80124adf9c59bc9893aa28d2 in term 1.
I20250411 13:58:07.909415 20785 leader_election.cc:304] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 122f34c0ee774ec9a21f147299a21168, f5f00e6f80124adf9c59bc9893aa28d2; no voters: 
I20250411 13:58:07.910389 21070 tablet_bootstrap.cc:654] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:07.910698 21105 raft_consensus.cc:2802] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2 [term 1 FOLLOWER]: Leader election won for term 1
I20250411 13:58:07.915984 21074 tablet_bootstrap.cc:492] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2: No bootstrap required, opened a new log
I20250411 13:58:07.916409 21074 ts_tablet_manager.cc:1397] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2: Time spent bootstrapping tablet: real 0.020s	user 0.007s	sys 0.010s
I20250411 13:58:07.919703 21074 raft_consensus.cc:357] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.920349 21074 raft_consensus.cc:383] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:07.920612 21074 raft_consensus.cc:738] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: f5f00e6f80124adf9c59bc9893aa28d2, State: Initialized, Role: FOLLOWER
I20250411 13:58:07.921013 21080 leader_election.cc:290] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 election: Requested vote from peers 122f34c0ee774ec9a21f147299a21168 (127.15.113.1:33621), 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:07.921380 21074 consensus_queue.cc:260] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.924016 20991 raft_consensus.cc:2466] T 82ee2b302bbb436ab32dd61acbf16229 P 22912eefbeb34010bb4bb7c0acaddec2 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate f5f00e6f80124adf9c59bc9893aa28d2 in term 1.
I20250411 13:58:07.924489 21105 raft_consensus.cc:695] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2 [term 1 LEADER]: Becoming Leader. State: Replica: f5f00e6f80124adf9c59bc9893aa28d2, State: Running, Role: LEADER
I20250411 13:58:07.925395 21105 consensus_queue.cc:237] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2 [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 1, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.930588 21074 ts_tablet_manager.cc:1428] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2: Time spent starting tablet: real 0.014s	user 0.003s	sys 0.011s
I20250411 13:58:07.931486 21074 tablet_bootstrap.cc:492] T 92dee3062e3e4cebb49c41c569889f43 P f5f00e6f80124adf9c59bc9893aa28d2: Bootstrap starting.
I20250411 13:58:07.908063 20851 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "92dee3062e3e4cebb49c41c569889f43" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" is_pre_election: true
W20250411 13:58:07.932976 20644 leader_election.cc:343] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 pre-election: Tablet error from VoteRequest() call to peer f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531): Illegal state: must be running to vote when last-logged opid is not known
I20250411 13:58:07.940240 21074 tablet_bootstrap.cc:654] T 92dee3062e3e4cebb49c41c569889f43 P f5f00e6f80124adf9c59bc9893aa28d2: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:07.935923 21094 leader_election.cc:290] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531), 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:07.946731 21094 raft_consensus.cc:2802] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:58:07.947181 21094 raft_consensus.cc:491] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:58:07.947544 21094 raft_consensus.cc:3058] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:07.930725 21072 tablet_bootstrap.cc:492] T 163e127ff588462996fe89e47afb9ee3 P 22912eefbeb34010bb4bb7c0acaddec2: No bootstrap required, opened a new log
I20250411 13:58:07.954007 21072 ts_tablet_manager.cc:1397] T 163e127ff588462996fe89e47afb9ee3 P 22912eefbeb34010bb4bb7c0acaddec2: Time spent bootstrapping tablet: real 0.116s	user 0.008s	sys 0.015s
I20250411 13:58:07.956974 21072 raft_consensus.cc:357] T 163e127ff588462996fe89e47afb9ee3 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.957909 21072 raft_consensus.cc:383] T 163e127ff588462996fe89e47afb9ee3 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:07.958220 21072 raft_consensus.cc:738] T 163e127ff588462996fe89e47afb9ee3 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 22912eefbeb34010bb4bb7c0acaddec2, State: Initialized, Role: FOLLOWER
I20250411 13:58:07.958527 21112 raft_consensus.cc:491] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:58:07.959069 21112 raft_consensus.cc:513] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.959178 21072 consensus_queue.cc:260] T 163e127ff588462996fe89e47afb9ee3 P 22912eefbeb34010bb4bb7c0acaddec2 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.962530 21112 leader_election.cc:290] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers 122f34c0ee774ec9a21f147299a21168 (127.15.113.1:33621), 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:07.963573 20991 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "163e127ff588462996fe89e47afb9ee3" candidate_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "22912eefbeb34010bb4bb7c0acaddec2" is_pre_election: true
I20250411 13:58:07.964131 20991 raft_consensus.cc:2466] T 163e127ff588462996fe89e47afb9ee3 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate f5f00e6f80124adf9c59bc9893aa28d2 in term 0.
I20250411 13:58:07.966215 20786 leader_election.cc:304] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 22912eefbeb34010bb4bb7c0acaddec2, f5f00e6f80124adf9c59bc9893aa28d2; no voters: 
I20250411 13:58:07.967059 21112 raft_consensus.cc:2802] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:58:07.967409 21112 raft_consensus.cc:491] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:58:07.967981 21112 raft_consensus.cc:3058] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:07.976827 21072 ts_tablet_manager.cc:1428] T 163e127ff588462996fe89e47afb9ee3 P 22912eefbeb34010bb4bb7c0acaddec2: Time spent starting tablet: real 0.023s	user 0.005s	sys 0.001s
I20250411 13:58:07.974939 21112 raft_consensus.cc:513] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:07.991575 21072 tablet_bootstrap.cc:492] T e05ab5d0d86c4259ae74d3dca627589c P 22912eefbeb34010bb4bb7c0acaddec2: Bootstrap starting.
I20250411 13:58:07.996079 21112 leader_election.cc:290] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 election: Requested vote from peers 122f34c0ee774ec9a21f147299a21168 (127.15.113.1:33621), 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:07.997330 20991 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "163e127ff588462996fe89e47afb9ee3" candidate_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "22912eefbeb34010bb4bb7c0acaddec2"
I20250411 13:58:07.997902 20991 raft_consensus.cc:3058] T 163e127ff588462996fe89e47afb9ee3 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:07.998008 21112 raft_consensus.cc:491] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:58:07.997594 20521 catalog_manager.cc:5581] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2 reported cstate change: term changed from 0 to 1, leader changed from <none> to f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2). New cstate: current_term: 1 leader_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } health_report { overall_health: UNKNOWN } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } health_report { overall_health: HEALTHY } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } health_report { overall_health: UNKNOWN } } }
I20250411 13:58:07.998843 21074 tablet_bootstrap.cc:492] T 92dee3062e3e4cebb49c41c569889f43 P f5f00e6f80124adf9c59bc9893aa28d2: No bootstrap required, opened a new log
I20250411 13:58:07.999642 21074 ts_tablet_manager.cc:1397] T 92dee3062e3e4cebb49c41c569889f43 P f5f00e6f80124adf9c59bc9893aa28d2: Time spent bootstrapping tablet: real 0.068s	user 0.014s	sys 0.002s
I20250411 13:58:07.998443 21112 raft_consensus.cc:513] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.003917 20712 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "163e127ff588462996fe89e47afb9ee3" candidate_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "122f34c0ee774ec9a21f147299a21168" is_pre_election: true
I20250411 13:58:08.004686 21112 leader_election.cc:290] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers 122f34c0ee774ec9a21f147299a21168 (127.15.113.1:33621), 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:08.005697 20991 raft_consensus.cc:2466] T 163e127ff588462996fe89e47afb9ee3 P 22912eefbeb34010bb4bb7c0acaddec2 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate f5f00e6f80124adf9c59bc9893aa28d2 in term 1.
I20250411 13:58:08.005733 20992 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "46a1673aabd54e5f83f6b401b54bf2dc" candidate_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "22912eefbeb34010bb4bb7c0acaddec2" is_pre_election: true
I20250411 13:58:08.006649 20786 leader_election.cc:304] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 22912eefbeb34010bb4bb7c0acaddec2, f5f00e6f80124adf9c59bc9893aa28d2; no voters: 
W20250411 13:58:08.007225 20785 leader_election.cc:343] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 pre-election: Tablet error from VoteRequest() call to peer 122f34c0ee774ec9a21f147299a21168 (127.15.113.1:33621): Illegal state: must be running to vote when last-logged opid is not known
W20250411 13:58:08.007476 20786 leader_election.cc:343] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 pre-election: Tablet error from VoteRequest() call to peer 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391): Illegal state: must be running to vote when last-logged opid is not known
I20250411 13:58:08.008437 21078 raft_consensus.cc:491] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:58:08.008242 20711 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "163e127ff588462996fe89e47afb9ee3" candidate_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "122f34c0ee774ec9a21f147299a21168"
I20250411 13:58:08.008867 21078 raft_consensus.cc:513] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
W20250411 13:58:08.010859 20785 leader_election.cc:343] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 election: Tablet error from VoteRequest() call to peer 122f34c0ee774ec9a21f147299a21168 (127.15.113.1:33621): Illegal state: must be running to vote when last-logged opid is not known
I20250411 13:58:08.012104 21078 leader_election.cc:290] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers 122f34c0ee774ec9a21f147299a21168 (127.15.113.1:33621), f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531)
I20250411 13:58:08.014029 20711 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "46a1673aabd54e5f83f6b401b54bf2dc" candidate_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "122f34c0ee774ec9a21f147299a21168" is_pre_election: true
I20250411 13:58:08.014564 20711 raft_consensus.cc:2466] T 46a1673aabd54e5f83f6b401b54bf2dc P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate f5f00e6f80124adf9c59bc9893aa28d2 in term 0.
I20250411 13:58:08.015033 21112 raft_consensus.cc:2802] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 [term 1 FOLLOWER]: Leader election won for term 1
I20250411 13:58:08.016525 21112 raft_consensus.cc:695] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 [term 1 LEADER]: Becoming Leader. State: Replica: f5f00e6f80124adf9c59bc9893aa28d2, State: Running, Role: LEADER
I20250411 13:58:08.017571 20785 leader_election.cc:304] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 3 responses out of 3 voters: 2 yes votes; 1 no votes. yes voters: 122f34c0ee774ec9a21f147299a21168, f5f00e6f80124adf9c59bc9893aa28d2; no voters: 22912eefbeb34010bb4bb7c0acaddec2
I20250411 13:58:08.017851 21074 raft_consensus.cc:357] T 92dee3062e3e4cebb49c41c569889f43 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.018507 21074 raft_consensus.cc:383] T 92dee3062e3e4cebb49c41c569889f43 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:08.018787 21074 raft_consensus.cc:738] T 92dee3062e3e4cebb49c41c569889f43 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: f5f00e6f80124adf9c59bc9893aa28d2, State: Initialized, Role: FOLLOWER
I20250411 13:58:08.019428 21074 consensus_queue.cc:260] T 92dee3062e3e4cebb49c41c569889f43 P f5f00e6f80124adf9c59bc9893aa28d2 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.019882 21105 raft_consensus.cc:2802] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:58:08.020964 21072 tablet_bootstrap.cc:654] T e05ab5d0d86c4259ae74d3dca627589c P 22912eefbeb34010bb4bb7c0acaddec2: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:08.020936 21105 raft_consensus.cc:491] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:58:08.021869 21105 raft_consensus.cc:3058] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:08.021972 21112 consensus_queue.cc:237] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 1, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
W20250411 13:58:08.025096 20645 outbound_call.cc:321] RPC callback for RPC call kudu.consensus.ConsensusService.RequestConsensusVote -> {remote=127.15.113.3:33391, user_credentials={real_user=slave}} blocked reactor thread for 126175us
I20250411 13:58:08.026213 21075 raft_consensus.cc:695] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 [term 1 LEADER]: Becoming Leader. State: Replica: 122f34c0ee774ec9a21f147299a21168, State: Running, Role: LEADER
I20250411 13:58:08.026917 21074 ts_tablet_manager.cc:1428] T 92dee3062e3e4cebb49c41c569889f43 P f5f00e6f80124adf9c59bc9893aa28d2: Time spent starting tablet: real 0.027s	user 0.002s	sys 0.005s
I20250411 13:58:08.026355 21104 raft_consensus.cc:491] T 46a1673aabd54e5f83f6b401b54bf2dc P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:58:08.027817 21074 tablet_bootstrap.cc:492] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2: Bootstrap starting.
I20250411 13:58:08.028894 21104 raft_consensus.cc:513] T 46a1673aabd54e5f83f6b401b54bf2dc P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.032773 21075 consensus_queue.cc:237] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 1, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } } peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } }
I20250411 13:58:08.032909 21108 raft_consensus.cc:491] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:58:08.033756 21108 raft_consensus.cc:513] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.036378 20851 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "46a1673aabd54e5f83f6b401b54bf2dc" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" is_pre_election: true
I20250411 13:58:08.038973 21108 leader_election.cc:290] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531), 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:08.039883 20852 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "e05ab5d0d86c4259ae74d3dca627589c" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" is_pre_election: true
W20250411 13:58:08.042209 20644 leader_election.cc:343] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 pre-election: Tablet error from VoteRequest() call to peer f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531): Illegal state: must be running to vote when last-logged opid is not known
I20250411 13:58:08.043430 21104 leader_election.cc:290] T 46a1673aabd54e5f83f6b401b54bf2dc P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531), 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:08.056706 21072 tablet_bootstrap.cc:492] T e05ab5d0d86c4259ae74d3dca627589c P 22912eefbeb34010bb4bb7c0acaddec2: No bootstrap required, opened a new log
I20250411 13:58:08.057126 21072 ts_tablet_manager.cc:1397] T e05ab5d0d86c4259ae74d3dca627589c P 22912eefbeb34010bb4bb7c0acaddec2: Time spent bootstrapping tablet: real 0.066s	user 0.007s	sys 0.007s
I20250411 13:58:08.059990 21074 tablet_bootstrap.cc:654] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:08.059285 21072 raft_consensus.cc:357] T e05ab5d0d86c4259ae74d3dca627589c P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.060443 21072 raft_consensus.cc:383] T e05ab5d0d86c4259ae74d3dca627589c P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:08.060693 21072 raft_consensus.cc:738] T e05ab5d0d86c4259ae74d3dca627589c P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 22912eefbeb34010bb4bb7c0acaddec2, State: Initialized, Role: FOLLOWER
I20250411 13:58:08.061255 21072 consensus_queue.cc:260] T e05ab5d0d86c4259ae74d3dca627589c P 22912eefbeb34010bb4bb7c0acaddec2 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.075430 21072 ts_tablet_manager.cc:1428] T e05ab5d0d86c4259ae74d3dca627589c P 22912eefbeb34010bb4bb7c0acaddec2: Time spent starting tablet: real 0.018s	user 0.006s	sys 0.001s
I20250411 13:58:08.079689 21070 tablet_bootstrap.cc:492] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168: No bootstrap required, opened a new log
I20250411 13:58:08.080143 21070 ts_tablet_manager.cc:1397] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168: Time spent bootstrapping tablet: real 0.217s	user 0.025s	sys 0.016s
I20250411 13:58:08.080869 21072 tablet_bootstrap.cc:492] T 05333cbabc6e492d9d8bb10b6394a9ad P 22912eefbeb34010bb4bb7c0acaddec2: Bootstrap starting.
I20250411 13:58:08.082481 21070 raft_consensus.cc:357] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.083318 21070 raft_consensus.cc:383] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:08.083626 21070 raft_consensus.cc:738] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 122f34c0ee774ec9a21f147299a21168, State: Initialized, Role: FOLLOWER
I20250411 13:58:08.084335 21070 consensus_queue.cc:260] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.086266 21070 ts_tablet_manager.cc:1428] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168: Time spent starting tablet: real 0.006s	user 0.004s	sys 0.000s
I20250411 13:58:08.090167 21072 tablet_bootstrap.cc:654] T 05333cbabc6e492d9d8bb10b6394a9ad P 22912eefbeb34010bb4bb7c0acaddec2: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:08.100399 21094 raft_consensus.cc:513] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168 [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.105139 20991 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "e05ab5d0d86c4259ae74d3dca627589c" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "22912eefbeb34010bb4bb7c0acaddec2" is_pre_election: true
I20250411 13:58:08.105512 20992 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "46a1673aabd54e5f83f6b401b54bf2dc" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "22912eefbeb34010bb4bb7c0acaddec2" is_pre_election: true
I20250411 13:58:08.105682 20991 raft_consensus.cc:2466] T e05ab5d0d86c4259ae74d3dca627589c P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 122f34c0ee774ec9a21f147299a21168 in term 0.
I20250411 13:58:08.105894 21074 tablet_bootstrap.cc:492] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2: No bootstrap required, opened a new log
I20250411 13:58:08.106232 21074 ts_tablet_manager.cc:1397] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2: Time spent bootstrapping tablet: real 0.079s	user 0.008s	sys 0.027s
I20250411 13:58:08.106429 21105 raft_consensus.cc:513] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.107553 20851 raft_consensus.cc:2391] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [term 1 FOLLOWER]: Leader pre-election vote request: Denying vote to candidate 122f34c0ee774ec9a21f147299a21168 in current term 1: Already voted for candidate f5f00e6f80124adf9c59bc9893aa28d2 in this term.
I20250411 13:58:08.109362 21074 raft_consensus.cc:357] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.109941 21074 raft_consensus.cc:383] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:08.110167 21074 raft_consensus.cc:738] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: f5f00e6f80124adf9c59bc9893aa28d2, State: Initialized, Role: FOLLOWER
I20250411 13:58:08.112175 20711 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "46a1673aabd54e5f83f6b401b54bf2dc" candidate_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "122f34c0ee774ec9a21f147299a21168"
I20250411 13:58:08.112661 20711 raft_consensus.cc:3058] T 46a1673aabd54e5f83f6b401b54bf2dc P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:08.105851 21070 tablet_bootstrap.cc:492] T f06a30b6a1ae4fc98270958c903ee84e P 122f34c0ee774ec9a21f147299a21168: Bootstrap starting.
I20250411 13:58:08.114816 21072 tablet_bootstrap.cc:492] T 05333cbabc6e492d9d8bb10b6394a9ad P 22912eefbeb34010bb4bb7c0acaddec2: No bootstrap required, opened a new log
I20250411 13:58:08.115206 21072 ts_tablet_manager.cc:1397] T 05333cbabc6e492d9d8bb10b6394a9ad P 22912eefbeb34010bb4bb7c0acaddec2: Time spent bootstrapping tablet: real 0.035s	user 0.009s	sys 0.000s
I20250411 13:58:08.115311 20991 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "46a1673aabd54e5f83f6b401b54bf2dc" candidate_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "22912eefbeb34010bb4bb7c0acaddec2"
W20250411 13:58:08.108071 20645 leader_election.cc:343] T 46a1673aabd54e5f83f6b401b54bf2dc P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 pre-election: Tablet error from VoteRequest() call to peer 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391): Illegal state: must be running to vote when last-logged opid is not known
I20250411 13:58:08.116479 20645 leader_election.cc:304] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 3 responses out of 3 voters: 2 yes votes; 1 no votes. yes voters: 122f34c0ee774ec9a21f147299a21168, 22912eefbeb34010bb4bb7c0acaddec2; no voters: f5f00e6f80124adf9c59bc9893aa28d2
I20250411 13:58:08.117302 21072 raft_consensus.cc:357] T 05333cbabc6e492d9d8bb10b6394a9ad P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.117933 21072 raft_consensus.cc:383] T 05333cbabc6e492d9d8bb10b6394a9ad P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:08.118162 21072 raft_consensus.cc:738] T 05333cbabc6e492d9d8bb10b6394a9ad P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 22912eefbeb34010bb4bb7c0acaddec2, State: Initialized, Role: FOLLOWER
I20250411 13:58:08.118685 21072 consensus_queue.cc:260] T 05333cbabc6e492d9d8bb10b6394a9ad P 22912eefbeb34010bb4bb7c0acaddec2 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.119246 20644 leader_election.cc:304] T 46a1673aabd54e5f83f6b401b54bf2dc P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate lost. Election summary: received 3 responses out of 3 voters: 1 yes votes; 2 no votes. yes voters: 122f34c0ee774ec9a21f147299a21168; no voters: 22912eefbeb34010bb4bb7c0acaddec2, f5f00e6f80124adf9c59bc9893aa28d2
I20250411 13:58:08.119776 21070 tablet_bootstrap.cc:654] T f06a30b6a1ae4fc98270958c903ee84e P 122f34c0ee774ec9a21f147299a21168: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:08.119889 21075 raft_consensus.cc:2802] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:58:08.120209 21075 raft_consensus.cc:491] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:58:08.120492 21075 raft_consensus.cc:3058] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Advancing to term 1
W20250411 13:58:08.120780 20786 leader_election.cc:343] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 election: Tablet error from VoteRequest() call to peer 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391): Illegal state: must be running to vote when last-logged opid is not known
I20250411 13:58:08.122611 21105 leader_election.cc:290] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 election: Requested vote from peers 122f34c0ee774ec9a21f147299a21168 (127.15.113.1:33621), 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:08.123471 21094 leader_election.cc:290] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 election: Requested vote from peers f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531), 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:08.124722 20851 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "92dee3062e3e4cebb49c41c569889f43" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "f5f00e6f80124adf9c59bc9893aa28d2"
I20250411 13:58:08.125212 20851 raft_consensus.cc:3058] T 92dee3062e3e4cebb49c41c569889f43 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:08.126464 21075 raft_consensus.cc:513] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.128228 21075 leader_election.cc:290] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 election: Requested vote from peers f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531), 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:08.130306 20852 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "e05ab5d0d86c4259ae74d3dca627589c" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "f5f00e6f80124adf9c59bc9893aa28d2"
W20250411 13:58:08.131582 20644 leader_election.cc:343] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 election: Tablet error from VoteRequest() call to peer f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531): Illegal state: must be running to vote when last-logged opid is not known
I20250411 13:58:08.131855 21070 tablet_bootstrap.cc:492] T f06a30b6a1ae4fc98270958c903ee84e P 122f34c0ee774ec9a21f147299a21168: No bootstrap required, opened a new log
I20250411 13:58:08.110678 21074 consensus_queue.cc:260] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.132841 21070 ts_tablet_manager.cc:1397] T f06a30b6a1ae4fc98270958c903ee84e P 122f34c0ee774ec9a21f147299a21168: Time spent bootstrapping tablet: real 0.027s	user 0.010s	sys 0.006s
I20250411 13:58:08.134341 20711 raft_consensus.cc:2466] T 46a1673aabd54e5f83f6b401b54bf2dc P 122f34c0ee774ec9a21f147299a21168 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate f5f00e6f80124adf9c59bc9893aa28d2 in term 1.
I20250411 13:58:08.137770 20851 raft_consensus.cc:2466] T 92dee3062e3e4cebb49c41c569889f43 P f5f00e6f80124adf9c59bc9893aa28d2 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate 122f34c0ee774ec9a21f147299a21168 in term 1.
I20250411 13:58:08.138410 21078 raft_consensus.cc:491] T f06a30b6a1ae4fc98270958c903ee84e P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:58:08.138959 20991 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "92dee3062e3e4cebb49c41c569889f43" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "22912eefbeb34010bb4bb7c0acaddec2"
I20250411 13:58:08.138815 21078 raft_consensus.cc:513] T f06a30b6a1ae4fc98270958c903ee84e P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.139436 20991 raft_consensus.cc:3058] T 92dee3062e3e4cebb49c41c569889f43 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:08.140564 21078 leader_election.cc:290] T f06a30b6a1ae4fc98270958c903ee84e P 22912eefbeb34010bb4bb7c0acaddec2 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers 122f34c0ee774ec9a21f147299a21168 (127.15.113.1:33621), f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531)
I20250411 13:58:08.143421 21072 ts_tablet_manager.cc:1428] T 05333cbabc6e492d9d8bb10b6394a9ad P 22912eefbeb34010bb4bb7c0acaddec2: Time spent starting tablet: real 0.028s	user 0.005s	sys 0.000s
I20250411 13:58:08.144213 21072 tablet_bootstrap.cc:492] T 46a1673aabd54e5f83f6b401b54bf2dc P 22912eefbeb34010bb4bb7c0acaddec2: Bootstrap starting.
I20250411 13:58:08.147294 20991 raft_consensus.cc:2466] T 92dee3062e3e4cebb49c41c569889f43 P 22912eefbeb34010bb4bb7c0acaddec2 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate 122f34c0ee774ec9a21f147299a21168 in term 1.
I20250411 13:58:08.149538 21072 tablet_bootstrap.cc:654] T 46a1673aabd54e5f83f6b401b54bf2dc P 22912eefbeb34010bb4bb7c0acaddec2: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:08.149181 20645 leader_election.cc:304] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 122f34c0ee774ec9a21f147299a21168, 22912eefbeb34010bb4bb7c0acaddec2; no voters: 
I20250411 13:58:08.150388 21075 raft_consensus.cc:2802] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168 [term 1 FOLLOWER]: Leader election won for term 1
I20250411 13:58:08.150938 21075 raft_consensus.cc:695] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168 [term 1 LEADER]: Becoming Leader. State: Replica: 122f34c0ee774ec9a21f147299a21168, State: Running, Role: LEADER
I20250411 13:58:08.151783 21075 consensus_queue.cc:237] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168 [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 1, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.155058 21070 raft_consensus.cc:357] T f06a30b6a1ae4fc98270958c903ee84e P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.155385 21104 raft_consensus.cc:2747] T 46a1673aabd54e5f83f6b401b54bf2dc P 122f34c0ee774ec9a21f147299a21168 [term 1 FOLLOWER]: Leader pre-election lost for term 1. Reason: could not achieve majority
I20250411 13:58:08.155649 21070 raft_consensus.cc:383] T f06a30b6a1ae4fc98270958c903ee84e P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:08.155898 21070 raft_consensus.cc:738] T f06a30b6a1ae4fc98270958c903ee84e P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 122f34c0ee774ec9a21f147299a21168, State: Initialized, Role: FOLLOWER
I20250411 13:58:08.156411 21070 consensus_queue.cc:260] T f06a30b6a1ae4fc98270958c903ee84e P 122f34c0ee774ec9a21f147299a21168 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.158303 21070 ts_tablet_manager.cc:1428] T f06a30b6a1ae4fc98270958c903ee84e P 122f34c0ee774ec9a21f147299a21168: Time spent starting tablet: real 0.025s	user 0.001s	sys 0.001s
I20250411 13:58:08.159052 21070 tablet_bootstrap.cc:492] T 163e127ff588462996fe89e47afb9ee3 P 122f34c0ee774ec9a21f147299a21168: Bootstrap starting.
I20250411 13:58:08.159022 20991 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "e05ab5d0d86c4259ae74d3dca627589c" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "22912eefbeb34010bb4bb7c0acaddec2"
I20250411 13:58:08.159608 20991 raft_consensus.cc:3058] T e05ab5d0d86c4259ae74d3dca627589c P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:08.163635 21070 tablet_bootstrap.cc:654] T 163e127ff588462996fe89e47afb9ee3 P 122f34c0ee774ec9a21f147299a21168: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:08.174252 21070 tablet_bootstrap.cc:492] T 163e127ff588462996fe89e47afb9ee3 P 122f34c0ee774ec9a21f147299a21168: No bootstrap required, opened a new log
I20250411 13:58:08.174631 21070 ts_tablet_manager.cc:1397] T 163e127ff588462996fe89e47afb9ee3 P 122f34c0ee774ec9a21f147299a21168: Time spent bootstrapping tablet: real 0.016s	user 0.007s	sys 0.007s
I20250411 13:58:08.183164 21070 raft_consensus.cc:357] T 163e127ff588462996fe89e47afb9ee3 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.183754 21070 raft_consensus.cc:383] T 163e127ff588462996fe89e47afb9ee3 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:08.183972 21070 raft_consensus.cc:738] T 163e127ff588462996fe89e47afb9ee3 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 122f34c0ee774ec9a21f147299a21168, State: Initialized, Role: FOLLOWER
I20250411 13:58:08.184487 21070 consensus_queue.cc:260] T 163e127ff588462996fe89e47afb9ee3 P 122f34c0ee774ec9a21f147299a21168 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.186373 21070 ts_tablet_manager.cc:1428] T 163e127ff588462996fe89e47afb9ee3 P 122f34c0ee774ec9a21f147299a21168: Time spent starting tablet: real 0.011s	user 0.000s	sys 0.002s
I20250411 13:58:08.142004 21074 ts_tablet_manager.cc:1428] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2: Time spent starting tablet: real 0.036s	user 0.005s	sys 0.000s
I20250411 13:58:08.189043 21074 tablet_bootstrap.cc:492] T 7a5ac5676226458ead4471475edfb6f7 P f5f00e6f80124adf9c59bc9893aa28d2: Bootstrap starting.
I20250411 13:58:08.144488 21105 raft_consensus.cc:491] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:58:08.191069 21105 raft_consensus.cc:513] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.192727 21105 leader_election.cc:290] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers 122f34c0ee774ec9a21f147299a21168 (127.15.113.1:33621), 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:08.158162 20785 leader_election.cc:304] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 election: Election decided. Result: candidate won. Election summary: received 3 responses out of 3 voters: 2 yes votes; 1 no votes. yes voters: 122f34c0ee774ec9a21f147299a21168, f5f00e6f80124adf9c59bc9893aa28d2; no voters: 22912eefbeb34010bb4bb7c0acaddec2
I20250411 13:58:08.195466 20992 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "f06a30b6a1ae4fc98270958c903ee84e" candidate_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "22912eefbeb34010bb4bb7c0acaddec2" is_pre_election: true
I20250411 13:58:08.196050 20992 raft_consensus.cc:2466] T f06a30b6a1ae4fc98270958c903ee84e P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate f5f00e6f80124adf9c59bc9893aa28d2 in term 0.
I20250411 13:58:08.197516 20786 leader_election.cc:304] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 22912eefbeb34010bb4bb7c0acaddec2, f5f00e6f80124adf9c59bc9893aa28d2; no voters: 
I20250411 13:58:08.198715 21112 raft_consensus.cc:2802] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [term 1 FOLLOWER]: Leader election won for term 1
I20250411 13:58:08.199172 21112 raft_consensus.cc:695] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [term 1 LEADER]: Becoming Leader. State: Replica: f5f00e6f80124adf9c59bc9893aa28d2, State: Running, Role: LEADER
I20250411 13:58:08.199872 21112 consensus_queue.cc:237] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 1, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.200526 21110 raft_consensus.cc:2802] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:58:08.200834 21110 raft_consensus.cc:491] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:58:08.201150 21110 raft_consensus.cc:3058] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:08.200616 20523 catalog_manager.cc:5581] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 reported cstate change: term changed from 0 to 1, leader changed from <none> to 122f34c0ee774ec9a21f147299a21168 (127.15.113.1). New cstate: current_term: 1 leader_uuid: "122f34c0ee774ec9a21f147299a21168" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } health_report { overall_health: UNKNOWN } } peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } health_report { overall_health: HEALTHY } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } health_report { overall_health: UNKNOWN } } }
I20250411 13:58:08.209592 21074 tablet_bootstrap.cc:654] T 7a5ac5676226458ead4471475edfb6f7 P f5f00e6f80124adf9c59bc9893aa28d2: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:08.209371 20522 catalog_manager.cc:5581] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 reported cstate change: term changed from 0 to 1, leader changed from <none> to f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2). New cstate: current_term: 1 leader_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } health_report { overall_health: UNKNOWN } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } health_report { overall_health: HEALTHY } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } health_report { overall_health: UNKNOWN } } }
I20250411 13:58:08.211575 20852 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "f06a30b6a1ae4fc98270958c903ee84e" candidate_uuid: "22912eefbeb34010bb4bb7c0acaddec2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" is_pre_election: true
I20250411 13:58:08.211653 20851 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "7a5ac5676226458ead4471475edfb6f7" candidate_uuid: "22912eefbeb34010bb4bb7c0acaddec2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" is_pre_election: true
W20250411 13:58:08.212934 20925 leader_election.cc:343] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 [CANDIDATE]: Term 1 pre-election: Tablet error from VoteRequest() call to peer f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531): Illegal state: must be running to vote when last-logged opid is not known
I20250411 13:58:08.220181 20991 raft_consensus.cc:2466] T e05ab5d0d86c4259ae74d3dca627589c P 22912eefbeb34010bb4bb7c0acaddec2 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate 122f34c0ee774ec9a21f147299a21168 in term 1.
I20250411 13:58:08.222609 21110 raft_consensus.cc:513] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2 [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.223738 20852 raft_consensus.cc:2391] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2 [term 1 FOLLOWER]: Leader pre-election vote request: Denying vote to candidate 22912eefbeb34010bb4bb7c0acaddec2 in current term 1: Already voted for candidate f5f00e6f80124adf9c59bc9893aa28d2 in this term.
I20250411 13:58:08.225528 21110 leader_election.cc:290] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 election: Requested vote from peers 122f34c0ee774ec9a21f147299a21168 (127.15.113.1:33621), 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:08.226507 20991 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "f06a30b6a1ae4fc98270958c903ee84e" candidate_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "22912eefbeb34010bb4bb7c0acaddec2"
I20250411 13:58:08.227054 20991 raft_consensus.cc:3058] T f06a30b6a1ae4fc98270958c903ee84e P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:08.230247 21072 tablet_bootstrap.cc:492] T 46a1673aabd54e5f83f6b401b54bf2dc P 22912eefbeb34010bb4bb7c0acaddec2: No bootstrap required, opened a new log
I20250411 13:58:08.230621 21072 ts_tablet_manager.cc:1397] T 46a1673aabd54e5f83f6b401b54bf2dc P 22912eefbeb34010bb4bb7c0acaddec2: Time spent bootstrapping tablet: real 0.087s	user 0.015s	sys 0.005s
I20250411 13:58:08.231220 20712 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "f06a30b6a1ae4fc98270958c903ee84e" candidate_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "122f34c0ee774ec9a21f147299a21168"
I20250411 13:58:08.231729 20712 raft_consensus.cc:3058] T f06a30b6a1ae4fc98270958c903ee84e P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:08.236605 20991 raft_consensus.cc:2466] T f06a30b6a1ae4fc98270958c903ee84e P 22912eefbeb34010bb4bb7c0acaddec2 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate f5f00e6f80124adf9c59bc9893aa28d2 in term 1.
I20250411 13:58:08.237278 20645 leader_election.cc:304] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 election: Election decided. Result: candidate won. Election summary: received 3 responses out of 3 voters: 2 yes votes; 1 no votes. yes voters: 122f34c0ee774ec9a21f147299a21168, 22912eefbeb34010bb4bb7c0acaddec2; no voters: f5f00e6f80124adf9c59bc9893aa28d2
I20250411 13:58:08.237571 20786 leader_election.cc:304] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 22912eefbeb34010bb4bb7c0acaddec2, f5f00e6f80124adf9c59bc9893aa28d2; no voters: 
I20250411 13:58:08.238306 21075 raft_consensus.cc:2802] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 [term 1 FOLLOWER]: Leader election won for term 1
I20250411 13:58:08.238307 21110 raft_consensus.cc:2802] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2 [term 1 FOLLOWER]: Leader election won for term 1
I20250411 13:58:08.238725 21075 raft_consensus.cc:695] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 [term 1 LEADER]: Becoming Leader. State: Replica: 122f34c0ee774ec9a21f147299a21168, State: Running, Role: LEADER
I20250411 13:58:08.238706 21110 raft_consensus.cc:695] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2 [term 1 LEADER]: Becoming Leader. State: Replica: f5f00e6f80124adf9c59bc9893aa28d2, State: Running, Role: LEADER
I20250411 13:58:08.239441 21075 consensus_queue.cc:237] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 1, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.239440 21110 consensus_queue.cc:237] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2 [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 1, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.241381 21072 raft_consensus.cc:357] T 46a1673aabd54e5f83f6b401b54bf2dc P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.241523 20711 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "f06a30b6a1ae4fc98270958c903ee84e" candidate_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "122f34c0ee774ec9a21f147299a21168" is_pre_election: true
I20250411 13:58:08.241972 21072 raft_consensus.cc:383] T 46a1673aabd54e5f83f6b401b54bf2dc P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:08.242198 21072 raft_consensus.cc:738] T 46a1673aabd54e5f83f6b401b54bf2dc P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 22912eefbeb34010bb4bb7c0acaddec2, State: Initialized, Role: FOLLOWER
I20250411 13:58:08.243172 21072 consensus_queue.cc:260] T 46a1673aabd54e5f83f6b401b54bf2dc P 22912eefbeb34010bb4bb7c0acaddec2 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.246059 20712 raft_consensus.cc:2466] T f06a30b6a1ae4fc98270958c903ee84e P 122f34c0ee774ec9a21f147299a21168 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate f5f00e6f80124adf9c59bc9893aa28d2 in term 1.
I20250411 13:58:08.262120 21072 ts_tablet_manager.cc:1428] T 46a1673aabd54e5f83f6b401b54bf2dc P 22912eefbeb34010bb4bb7c0acaddec2: Time spent starting tablet: real 0.031s	user 0.002s	sys 0.003s
I20250411 13:58:08.262956 21072 tablet_bootstrap.cc:492] T b0a853b842e54d5d993fc9199fd7dc0f P 22912eefbeb34010bb4bb7c0acaddec2: Bootstrap starting.
I20250411 13:58:08.273705 21072 tablet_bootstrap.cc:654] T b0a853b842e54d5d993fc9199fd7dc0f P 22912eefbeb34010bb4bb7c0acaddec2: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:08.290093 21094 raft_consensus.cc:491] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:58:08.290517 21094 raft_consensus.cc:513] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.292299 20712 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "7a5ac5676226458ead4471475edfb6f7" candidate_uuid: "22912eefbeb34010bb4bb7c0acaddec2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "122f34c0ee774ec9a21f147299a21168" is_pre_election: true
I20250411 13:58:08.293996 20712 raft_consensus.cc:2466] T 7a5ac5676226458ead4471475edfb6f7 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 22912eefbeb34010bb4bb7c0acaddec2 in term 0.
I20250411 13:58:08.293233 20711 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "f06a30b6a1ae4fc98270958c903ee84e" candidate_uuid: "22912eefbeb34010bb4bb7c0acaddec2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "122f34c0ee774ec9a21f147299a21168" is_pre_election: true
I20250411 13:58:08.295682 20711 raft_consensus.cc:2391] T f06a30b6a1ae4fc98270958c903ee84e P 122f34c0ee774ec9a21f147299a21168 [term 1 FOLLOWER]: Leader pre-election vote request: Denying vote to candidate 22912eefbeb34010bb4bb7c0acaddec2 in current term 1: Already voted for candidate f5f00e6f80124adf9c59bc9893aa28d2 in this term.
I20250411 13:58:08.296561 20925 leader_election.cc:304] T f06a30b6a1ae4fc98270958c903ee84e P 22912eefbeb34010bb4bb7c0acaddec2 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate lost. Election summary: received 3 responses out of 3 voters: 1 yes votes; 2 no votes. yes voters: 22912eefbeb34010bb4bb7c0acaddec2; no voters: 122f34c0ee774ec9a21f147299a21168, f5f00e6f80124adf9c59bc9893aa28d2
I20250411 13:58:08.299070 21078 raft_consensus.cc:2747] T f06a30b6a1ae4fc98270958c903ee84e P 22912eefbeb34010bb4bb7c0acaddec2 [term 1 FOLLOWER]: Leader pre-election lost for term 1. Reason: could not achieve majority
I20250411 13:58:08.301124 20925 leader_election.cc:304] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 3 responses out of 3 voters: 2 yes votes; 1 no votes. yes voters: 122f34c0ee774ec9a21f147299a21168, 22912eefbeb34010bb4bb7c0acaddec2; no voters: f5f00e6f80124adf9c59bc9893aa28d2
I20250411 13:58:08.302850 21078 raft_consensus.cc:2802] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:58:08.303260 21078 raft_consensus.cc:491] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:58:08.303603 21078 raft_consensus.cc:3058] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Advancing to term 1
W20250411 13:58:08.312057 20785 outbound_call.cc:321] RPC callback for RPC call kudu.consensus.ConsensusService.RequestConsensusVote -> {remote=127.15.113.1:33621, user_credentials={real_user=slave}} blocked reactor thread for 67845.3us
I20250411 13:58:08.330142 21094 leader_election.cc:290] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531), 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:08.331166 20852 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "05333cbabc6e492d9d8bb10b6394a9ad" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" is_pre_election: true
I20250411 13:58:08.331691 20991 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "05333cbabc6e492d9d8bb10b6394a9ad" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "22912eefbeb34010bb4bb7c0acaddec2" is_pre_election: true
I20250411 13:58:08.331943 21094 consensus_queue.cc:1035] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168 [LEADER]: Connected to new peer: Peer: permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:58:08.332218 20991 raft_consensus.cc:2466] T 05333cbabc6e492d9d8bb10b6394a9ad P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate 122f34c0ee774ec9a21f147299a21168 in term 0.
W20250411 13:58:08.332944 20644 leader_election.cc:343] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 pre-election: Tablet error from VoteRequest() call to peer f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531): Illegal state: must be running to vote when last-logged opid is not known
I20250411 13:58:08.334522 20645 leader_election.cc:304] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 3 responses out of 3 voters: 2 yes votes; 1 no votes. yes voters: 122f34c0ee774ec9a21f147299a21168, 22912eefbeb34010bb4bb7c0acaddec2; no voters: f5f00e6f80124adf9c59bc9893aa28d2
I20250411 13:58:08.335537 21094 raft_consensus.cc:2802] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:58:08.335896 21094 raft_consensus.cc:491] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:58:08.326161 21078 raft_consensus.cc:513] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.336217 21094 raft_consensus.cc:3058] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:08.338238 20712 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "7a5ac5676226458ead4471475edfb6f7" candidate_uuid: "22912eefbeb34010bb4bb7c0acaddec2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "122f34c0ee774ec9a21f147299a21168"
I20250411 13:58:08.338761 20712 raft_consensus.cc:3058] T 7a5ac5676226458ead4471475edfb6f7 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:08.326071 20520 catalog_manager.cc:5581] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 reported cstate change: term changed from 0 to 1, leader changed from <none> to f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2). New cstate: current_term: 1 leader_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } health_report { overall_health: UNKNOWN } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } health_report { overall_health: HEALTHY } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } health_report { overall_health: UNKNOWN } } }
I20250411 13:58:08.339246 20852 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "7a5ac5676226458ead4471475edfb6f7" candidate_uuid: "22912eefbeb34010bb4bb7c0acaddec2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "f5f00e6f80124adf9c59bc9893aa28d2"
W20250411 13:58:08.340348 20925 leader_election.cc:343] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 [CANDIDATE]: Term 1 election: Tablet error from VoteRequest() call to peer f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531): Illegal state: must be running to vote when last-logged opid is not known
I20250411 13:58:08.340787 21078 leader_election.cc:290] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 [CANDIDATE]: Term 1 election: Requested vote from peers 122f34c0ee774ec9a21f147299a21168 (127.15.113.1:33621), f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531)
I20250411 13:58:08.340324 20520 catalog_manager.cc:5581] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2 reported cstate change: term changed from 0 to 1, leader changed from <none> to f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2). New cstate: current_term: 1 leader_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } health_report { overall_health: UNKNOWN } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } health_report { overall_health: HEALTHY } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } health_report { overall_health: UNKNOWN } } }
I20250411 13:58:08.345501 20712 raft_consensus.cc:2466] T 7a5ac5676226458ead4471475edfb6f7 P 122f34c0ee774ec9a21f147299a21168 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate 22912eefbeb34010bb4bb7c0acaddec2 in term 1.
I20250411 13:58:08.355159 21074 tablet_bootstrap.cc:492] T 7a5ac5676226458ead4471475edfb6f7 P f5f00e6f80124adf9c59bc9893aa28d2: No bootstrap required, opened a new log
I20250411 13:58:08.355612 21074 ts_tablet_manager.cc:1397] T 7a5ac5676226458ead4471475edfb6f7 P f5f00e6f80124adf9c59bc9893aa28d2: Time spent bootstrapping tablet: real 0.167s	user 0.031s	sys 0.045s
I20250411 13:58:08.357548 21074 raft_consensus.cc:357] T 7a5ac5676226458ead4471475edfb6f7 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.358554 21074 raft_consensus.cc:383] T 7a5ac5676226458ead4471475edfb6f7 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:08.358857 21074 raft_consensus.cc:738] T 7a5ac5676226458ead4471475edfb6f7 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: f5f00e6f80124adf9c59bc9893aa28d2, State: Initialized, Role: FOLLOWER
I20250411 13:58:08.359010 20925 leader_election.cc:304] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 [CANDIDATE]: Term 1 election: Election decided. Result: candidate won. Election summary: received 3 responses out of 3 voters: 2 yes votes; 1 no votes. yes voters: 122f34c0ee774ec9a21f147299a21168, 22912eefbeb34010bb4bb7c0acaddec2; no voters: f5f00e6f80124adf9c59bc9893aa28d2
I20250411 13:58:08.359534 21074 consensus_queue.cc:260] T 7a5ac5676226458ead4471475edfb6f7 P f5f00e6f80124adf9c59bc9893aa28d2 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.360096 21133 raft_consensus.cc:2802] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 [term 1 FOLLOWER]: Leader election won for term 1
I20250411 13:58:08.361523 21074 ts_tablet_manager.cc:1428] T 7a5ac5676226458ead4471475edfb6f7 P f5f00e6f80124adf9c59bc9893aa28d2: Time spent starting tablet: real 0.006s	user 0.004s	sys 0.000s
I20250411 13:58:08.362294 21074 tablet_bootstrap.cc:492] T 05333cbabc6e492d9d8bb10b6394a9ad P f5f00e6f80124adf9c59bc9893aa28d2: Bootstrap starting.
I20250411 13:58:08.367504 21074 tablet_bootstrap.cc:654] T 05333cbabc6e492d9d8bb10b6394a9ad P f5f00e6f80124adf9c59bc9893aa28d2: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:08.379317 21133 raft_consensus.cc:695] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 [term 1 LEADER]: Becoming Leader. State: Replica: 22912eefbeb34010bb4bb7c0acaddec2, State: Running, Role: LEADER
I20250411 13:58:08.380227 21133 consensus_queue.cc:237] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 1, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.380390 21110 consensus_queue.cc:1035] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2 [LEADER]: Connected to new peer: Peer: permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:58:08.409173 21075 consensus_queue.cc:1035] T a8eb3b2828c745c591f3b57222005ae4 P 122f34c0ee774ec9a21f147299a21168 [LEADER]: Connected to new peer: Peer: permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:58:08.411013 21074 tablet_bootstrap.cc:492] T 05333cbabc6e492d9d8bb10b6394a9ad P f5f00e6f80124adf9c59bc9893aa28d2: No bootstrap required, opened a new log
I20250411 13:58:08.411484 21074 ts_tablet_manager.cc:1397] T 05333cbabc6e492d9d8bb10b6394a9ad P f5f00e6f80124adf9c59bc9893aa28d2: Time spent bootstrapping tablet: real 0.049s	user 0.009s	sys 0.000s
I20250411 13:58:08.413877 21074 raft_consensus.cc:357] T 05333cbabc6e492d9d8bb10b6394a9ad P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.414637 21074 raft_consensus.cc:383] T 05333cbabc6e492d9d8bb10b6394a9ad P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:08.414964 21074 raft_consensus.cc:738] T 05333cbabc6e492d9d8bb10b6394a9ad P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: f5f00e6f80124adf9c59bc9893aa28d2, State: Initialized, Role: FOLLOWER
I20250411 13:58:08.421962 21074 consensus_queue.cc:260] T 05333cbabc6e492d9d8bb10b6394a9ad P f5f00e6f80124adf9c59bc9893aa28d2 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.432624 20523 catalog_manager.cc:5581] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168 reported cstate change: term changed from 0 to 1, leader changed from <none> to 122f34c0ee774ec9a21f147299a21168 (127.15.113.1). New cstate: current_term: 1 leader_uuid: "122f34c0ee774ec9a21f147299a21168" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } health_report { overall_health: HEALTHY } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } health_report { overall_health: UNKNOWN } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } health_report { overall_health: UNKNOWN } } }
I20250411 13:58:08.436779 20523 catalog_manager.cc:5581] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 reported cstate change: term changed from 0 to 1, leader changed from <none> to 122f34c0ee774ec9a21f147299a21168 (127.15.113.1). New cstate: current_term: 1 leader_uuid: "122f34c0ee774ec9a21f147299a21168" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } health_report { overall_health: HEALTHY } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } health_report { overall_health: UNKNOWN } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } health_report { overall_health: UNKNOWN } } }
I20250411 13:58:08.424504 21074 ts_tablet_manager.cc:1428] T 05333cbabc6e492d9d8bb10b6394a9ad P f5f00e6f80124adf9c59bc9893aa28d2: Time spent starting tablet: real 0.013s	user 0.005s	sys 0.000s
I20250411 13:58:08.439965 21074 tablet_bootstrap.cc:492] T e05ab5d0d86c4259ae74d3dca627589c P f5f00e6f80124adf9c59bc9893aa28d2: Bootstrap starting.
I20250411 13:58:08.444792 21094 raft_consensus.cc:513] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168 [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.448275 21094 leader_election.cc:290] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 election: Requested vote from peers f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531), 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:08.450218 20851 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "05333cbabc6e492d9d8bb10b6394a9ad" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "f5f00e6f80124adf9c59bc9893aa28d2"
I20250411 13:58:08.450824 20851 raft_consensus.cc:3058] T 05333cbabc6e492d9d8bb10b6394a9ad P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:08.451457 20991 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "05333cbabc6e492d9d8bb10b6394a9ad" candidate_uuid: "122f34c0ee774ec9a21f147299a21168" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "22912eefbeb34010bb4bb7c0acaddec2"
I20250411 13:58:08.451959 20991 raft_consensus.cc:3058] T 05333cbabc6e492d9d8bb10b6394a9ad P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:08.458107 20851 raft_consensus.cc:2466] T 05333cbabc6e492d9d8bb10b6394a9ad P f5f00e6f80124adf9c59bc9893aa28d2 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate 122f34c0ee774ec9a21f147299a21168 in term 1.
I20250411 13:58:08.459299 21105 consensus_queue.cc:1035] T 82ee2b302bbb436ab32dd61acbf16229 P f5f00e6f80124adf9c59bc9893aa28d2 [LEADER]: Connected to new peer: Peer: permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:58:08.460165 21074 tablet_bootstrap.cc:654] T e05ab5d0d86c4259ae74d3dca627589c P f5f00e6f80124adf9c59bc9893aa28d2: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:08.460688 21072 tablet_bootstrap.cc:492] T b0a853b842e54d5d993fc9199fd7dc0f P 22912eefbeb34010bb4bb7c0acaddec2: No bootstrap required, opened a new log
I20250411 13:58:08.461107 21072 ts_tablet_manager.cc:1397] T b0a853b842e54d5d993fc9199fd7dc0f P 22912eefbeb34010bb4bb7c0acaddec2: Time spent bootstrapping tablet: real 0.198s	user 0.034s	sys 0.045s
I20250411 13:58:08.463161 21072 raft_consensus.cc:357] T b0a853b842e54d5d993fc9199fd7dc0f P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } } peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } }
I20250411 13:58:08.463750 21072 raft_consensus.cc:383] T b0a853b842e54d5d993fc9199fd7dc0f P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:08.464001 21072 raft_consensus.cc:738] T b0a853b842e54d5d993fc9199fd7dc0f P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 22912eefbeb34010bb4bb7c0acaddec2, State: Initialized, Role: FOLLOWER
I20250411 13:58:08.464506 20644 leader_election.cc:304] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168 [CANDIDATE]: Term 1 election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 122f34c0ee774ec9a21f147299a21168, f5f00e6f80124adf9c59bc9893aa28d2; no voters: 
I20250411 13:58:08.465466 21104 raft_consensus.cc:2802] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168 [term 1 FOLLOWER]: Leader election won for term 1
I20250411 13:58:08.465875 21104 raft_consensus.cc:695] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168 [term 1 LEADER]: Becoming Leader. State: Replica: 122f34c0ee774ec9a21f147299a21168, State: Running, Role: LEADER
I20250411 13:58:08.466501 21104 consensus_queue.cc:237] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168 [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 1, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.468472 21072 consensus_queue.cc:260] T b0a853b842e54d5d993fc9199fd7dc0f P 22912eefbeb34010bb4bb7c0acaddec2 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } } peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } }
I20250411 13:58:08.469172 20991 raft_consensus.cc:2466] T 05333cbabc6e492d9d8bb10b6394a9ad P 22912eefbeb34010bb4bb7c0acaddec2 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate 122f34c0ee774ec9a21f147299a21168 in term 1.
I20250411 13:58:08.475128 21072 ts_tablet_manager.cc:1428] T b0a853b842e54d5d993fc9199fd7dc0f P 22912eefbeb34010bb4bb7c0acaddec2: Time spent starting tablet: real 0.014s	user 0.005s	sys 0.000s
I20250411 13:58:08.487498 21110 consensus_queue.cc:1035] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 [LEADER]: Connected to new peer: Peer: permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:58:08.526703 21133 raft_consensus.cc:491] T b0a853b842e54d5d993fc9199fd7dc0f P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:58:08.527220 21133 raft_consensus.cc:513] T b0a853b842e54d5d993fc9199fd7dc0f P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } } peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } }
I20250411 13:58:08.528990 21133 leader_election.cc:290] T b0a853b842e54d5d993fc9199fd7dc0f P 22912eefbeb34010bb4bb7c0acaddec2 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers 122f34c0ee774ec9a21f147299a21168 (127.15.113.1:33621), f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531)
I20250411 13:58:08.528394 20522 catalog_manager.cc:5581] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 reported cstate change: term changed from 0 to 1, leader changed from <none> to 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3). New cstate: current_term: 1 leader_uuid: "22912eefbeb34010bb4bb7c0acaddec2" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } health_report { overall_health: UNKNOWN } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } health_report { overall_health: UNKNOWN } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } health_report { overall_health: HEALTHY } } }
I20250411 13:58:08.535104 20851 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "b0a853b842e54d5d993fc9199fd7dc0f" candidate_uuid: "22912eefbeb34010bb4bb7c0acaddec2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" is_pre_election: true
I20250411 13:58:08.535789 20851 raft_consensus.cc:2391] T b0a853b842e54d5d993fc9199fd7dc0f P f5f00e6f80124adf9c59bc9893aa28d2 [term 1 FOLLOWER]: Leader pre-election vote request: Denying vote to candidate 22912eefbeb34010bb4bb7c0acaddec2 in current term 1: Already voted for candidate 122f34c0ee774ec9a21f147299a21168 in this term.
I20250411 13:58:08.541288 20711 raft_consensus.cc:3058] T 163e127ff588462996fe89e47afb9ee3 P 122f34c0ee774ec9a21f147299a21168 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:08.553248 20712 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "b0a853b842e54d5d993fc9199fd7dc0f" candidate_uuid: "22912eefbeb34010bb4bb7c0acaddec2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "122f34c0ee774ec9a21f147299a21168" is_pre_election: true
I20250411 13:58:08.554574 20925 leader_election.cc:304] T b0a853b842e54d5d993fc9199fd7dc0f P 22912eefbeb34010bb4bb7c0acaddec2 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate lost. Election summary: received 3 responses out of 3 voters: 1 yes votes; 2 no votes. yes voters: 22912eefbeb34010bb4bb7c0acaddec2; no voters: 122f34c0ee774ec9a21f147299a21168, f5f00e6f80124adf9c59bc9893aa28d2
I20250411 13:58:08.555804 21133 raft_consensus.cc:3058] T b0a853b842e54d5d993fc9199fd7dc0f P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:08.557032 21110 consensus_queue.cc:1035] T 163e127ff588462996fe89e47afb9ee3 P f5f00e6f80124adf9c59bc9893aa28d2 [LEADER]: Connected to new peer: Peer: permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:58:08.573542 21075 consensus_queue.cc:1035] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168 [LEADER]: Connected to new peer: Peer: permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:58:08.575775 21094 consensus_queue.cc:1035] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 [LEADER]: Connected to new peer: Peer: permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.002s
I20250411 13:58:08.591423 21110 consensus_queue.cc:1035] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [LEADER]: Connected to new peer: Peer: permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:58:08.632194 21133 raft_consensus.cc:2747] T b0a853b842e54d5d993fc9199fd7dc0f P 22912eefbeb34010bb4bb7c0acaddec2 [term 1 FOLLOWER]: Leader pre-election lost for term 1. Reason: could not achieve majority
I20250411 13:58:08.639855 21075 consensus_queue.cc:1035] T b0a853b842e54d5d993fc9199fd7dc0f P 122f34c0ee774ec9a21f147299a21168 [LEADER]: Connected to new peer: Peer: permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:58:08.639988 21078 raft_consensus.cc:491] T 46a1673aabd54e5f83f6b401b54bf2dc P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:58:08.640693 21078 raft_consensus.cc:513] T 46a1673aabd54e5f83f6b401b54bf2dc P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.642571 21078 leader_election.cc:290] T 46a1673aabd54e5f83f6b401b54bf2dc P 22912eefbeb34010bb4bb7c0acaddec2 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers 122f34c0ee774ec9a21f147299a21168 (127.15.113.1:33621), f5f00e6f80124adf9c59bc9893aa28d2 (127.15.113.2:39531)
I20250411 13:58:08.643708 20709 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "46a1673aabd54e5f83f6b401b54bf2dc" candidate_uuid: "22912eefbeb34010bb4bb7c0acaddec2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "122f34c0ee774ec9a21f147299a21168" is_pre_election: true
I20250411 13:58:08.649510 20852 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "46a1673aabd54e5f83f6b401b54bf2dc" candidate_uuid: "22912eefbeb34010bb4bb7c0acaddec2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" is_pre_election: true
I20250411 13:58:08.650609 20925 leader_election.cc:304] T 46a1673aabd54e5f83f6b401b54bf2dc P 22912eefbeb34010bb4bb7c0acaddec2 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate lost. Election summary: received 3 responses out of 3 voters: 1 yes votes; 2 no votes. yes voters: 22912eefbeb34010bb4bb7c0acaddec2; no voters: 122f34c0ee774ec9a21f147299a21168, f5f00e6f80124adf9c59bc9893aa28d2
I20250411 13:58:08.651311 21078 raft_consensus.cc:2747] T 46a1673aabd54e5f83f6b401b54bf2dc P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Leader pre-election lost for term 1. Reason: could not achieve majority
I20250411 13:58:08.694761 21074 tablet_bootstrap.cc:492] T e05ab5d0d86c4259ae74d3dca627589c P f5f00e6f80124adf9c59bc9893aa28d2: No bootstrap required, opened a new log
I20250411 13:58:08.695369 21074 ts_tablet_manager.cc:1397] T e05ab5d0d86c4259ae74d3dca627589c P f5f00e6f80124adf9c59bc9893aa28d2: Time spent bootstrapping tablet: real 0.256s	user 0.034s	sys 0.029s
I20250411 13:58:08.698180 21074 raft_consensus.cc:357] T e05ab5d0d86c4259ae74d3dca627589c P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.708204 21074 raft_consensus.cc:383] T e05ab5d0d86c4259ae74d3dca627589c P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:08.708461 21074 raft_consensus.cc:738] T e05ab5d0d86c4259ae74d3dca627589c P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: f5f00e6f80124adf9c59bc9893aa28d2, State: Initialized, Role: FOLLOWER
I20250411 13:58:08.709048 21074 consensus_queue.cc:260] T e05ab5d0d86c4259ae74d3dca627589c P f5f00e6f80124adf9c59bc9893aa28d2 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.715798 21105 raft_consensus.cc:491] T 7a5ac5676226458ead4471475edfb6f7 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:58:08.716380 21105 raft_consensus.cc:513] T 7a5ac5676226458ead4471475edfb6f7 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.721977 20992 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "7a5ac5676226458ead4471475edfb6f7" candidate_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "22912eefbeb34010bb4bb7c0acaddec2" is_pre_election: true
I20250411 13:58:08.722401 20991 raft_consensus.cc:3058] T 46a1673aabd54e5f83f6b401b54bf2dc P 22912eefbeb34010bb4bb7c0acaddec2 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:08.718781 21105 leader_election.cc:290] T 7a5ac5676226458ead4471475edfb6f7 P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers 122f34c0ee774ec9a21f147299a21168 (127.15.113.1:33621), 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:08.734493 21074 ts_tablet_manager.cc:1428] T e05ab5d0d86c4259ae74d3dca627589c P f5f00e6f80124adf9c59bc9893aa28d2: Time spent starting tablet: real 0.039s	user 0.006s	sys 0.000s
I20250411 13:58:08.736647 21105 consensus_queue.cc:1035] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2 [LEADER]: Connected to new peer: Peer: permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:58:08.741482 21105 consensus_queue.cc:1035] T 46a1673aabd54e5f83f6b401b54bf2dc P f5f00e6f80124adf9c59bc9893aa28d2 [LEADER]: Connected to new peer: Peer: permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.001s
I20250411 13:58:08.768251 21105 raft_consensus.cc:491] T e05ab5d0d86c4259ae74d3dca627589c P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:58:08.768741 21105 raft_consensus.cc:513] T e05ab5d0d86c4259ae74d3dca627589c P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } }
I20250411 13:58:08.775583 21105 leader_election.cc:290] T e05ab5d0d86c4259ae74d3dca627589c P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers 122f34c0ee774ec9a21f147299a21168 (127.15.113.1:33621), 22912eefbeb34010bb4bb7c0acaddec2 (127.15.113.3:33391)
I20250411 13:58:08.777674 20990 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "e05ab5d0d86c4259ae74d3dca627589c" candidate_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "22912eefbeb34010bb4bb7c0acaddec2" is_pre_election: true
I20250411 13:58:08.778478 20990 raft_consensus.cc:2391] T e05ab5d0d86c4259ae74d3dca627589c P 22912eefbeb34010bb4bb7c0acaddec2 [term 1 FOLLOWER]: Leader pre-election vote request: Denying vote to candidate f5f00e6f80124adf9c59bc9893aa28d2 in current term 1: Already voted for candidate 122f34c0ee774ec9a21f147299a21168 in this term.
W20250411 13:58:08.759439 20644 outbound_call.cc:321] RPC callback for RPC call kudu.consensus.ConsensusService.UpdateConsensus -> {remote=127.15.113.2:39531, user_credentials={real_user=slave}} blocked reactor thread for 85595.4us
W20250411 13:58:08.778533 20645 outbound_call.cc:321] RPC callback for RPC call kudu.consensus.ConsensusService.UpdateConsensus -> {remote=127.15.113.3:33391, user_credentials={real_user=slave}} blocked reactor thread for 103760us
I20250411 13:58:08.790292 21152 consensus_queue.cc:1035] T 92dee3062e3e4cebb49c41c569889f43 P 122f34c0ee774ec9a21f147299a21168 [LEADER]: Connected to new peer: Peer: permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:58:08.788633 20711 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "7a5ac5676226458ead4471475edfb6f7" candidate_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "122f34c0ee774ec9a21f147299a21168" is_pre_election: true
I20250411 13:58:08.791098 20711 raft_consensus.cc:2391] T 7a5ac5676226458ead4471475edfb6f7 P 122f34c0ee774ec9a21f147299a21168 [term 1 FOLLOWER]: Leader pre-election vote request: Denying vote to candidate f5f00e6f80124adf9c59bc9893aa28d2 in current term 1: Already voted for candidate 22912eefbeb34010bb4bb7c0acaddec2 in this term.
I20250411 13:58:08.799209 20785 leader_election.cc:304] T 7a5ac5676226458ead4471475edfb6f7 P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate lost. Election summary: received 3 responses out of 3 voters: 1 yes votes; 2 no votes. yes voters: f5f00e6f80124adf9c59bc9893aa28d2; no voters: 122f34c0ee774ec9a21f147299a21168, 22912eefbeb34010bb4bb7c0acaddec2
I20250411 13:58:08.800698 20711 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "e05ab5d0d86c4259ae74d3dca627589c" candidate_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "122f34c0ee774ec9a21f147299a21168" is_pre_election: true
I20250411 13:58:08.800966 21112 raft_consensus.cc:3058] T 7a5ac5676226458ead4471475edfb6f7 P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:08.801733 21075 consensus_queue.cc:1035] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 [LEADER]: Connected to new peer: Peer: permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:58:08.807783 20785 leader_election.cc:304] T e05ab5d0d86c4259ae74d3dca627589c P f5f00e6f80124adf9c59bc9893aa28d2 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate lost. Election summary: received 3 responses out of 3 voters: 1 yes votes; 2 no votes. yes voters: f5f00e6f80124adf9c59bc9893aa28d2; no voters: 122f34c0ee774ec9a21f147299a21168, 22912eefbeb34010bb4bb7c0acaddec2
I20250411 13:58:08.808225 21133 consensus_queue.cc:1035] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 [LEADER]: Connected to new peer: Peer: permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:58:08.808856 21110 raft_consensus.cc:3058] T e05ab5d0d86c4259ae74d3dca627589c P f5f00e6f80124adf9c59bc9893aa28d2 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:08.876101 20522 catalog_manager.cc:5581] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168 reported cstate change: term changed from 0 to 1, leader changed from <none> to 122f34c0ee774ec9a21f147299a21168 (127.15.113.1). New cstate: current_term: 1 leader_uuid: "122f34c0ee774ec9a21f147299a21168" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 } health_report { overall_health: HEALTHY } } peers { permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 } health_report { overall_health: UNKNOWN } } peers { permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 } health_report { overall_health: UNKNOWN } } }
I20250411 13:58:08.880745 21112 raft_consensus.cc:2747] T 7a5ac5676226458ead4471475edfb6f7 P f5f00e6f80124adf9c59bc9893aa28d2 [term 1 FOLLOWER]: Leader pre-election lost for term 1. Reason: could not achieve majority
I20250411 13:58:08.881964 21110 raft_consensus.cc:2747] T e05ab5d0d86c4259ae74d3dca627589c P f5f00e6f80124adf9c59bc9893aa28d2 [term 1 FOLLOWER]: Leader pre-election lost for term 1. Reason: could not achieve majority
I20250411 13:58:08.886849 21075 consensus_queue.cc:1035] T e05ab5d0d86c4259ae74d3dca627589c P 122f34c0ee774ec9a21f147299a21168 [LEADER]: Connected to new peer: Peer: permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
W20250411 13:58:08.924228 20785 outbound_call.cc:321] RPC callback for RPC call kudu.consensus.ConsensusService.UpdateConsensus -> {remote=127.15.113.1:33621, user_credentials={real_user=slave}} blocked reactor thread for 75966.1us
I20250411 13:58:08.930269 21110 consensus_queue.cc:1035] T f06a30b6a1ae4fc98270958c903ee84e P f5f00e6f80124adf9c59bc9893aa28d2 [LEADER]: Connected to new peer: Peer: permanent_uuid: "122f34c0ee774ec9a21f147299a21168" member_type: VOTER last_known_addr { host: "127.15.113.1" port: 33621 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.001s
I20250411 13:58:08.961721 21133 consensus_queue.cc:1035] T 7a5ac5676226458ead4471475edfb6f7 P 22912eefbeb34010bb4bb7c0acaddec2 [LEADER]: Connected to new peer: Peer: permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:58:08.965162 21104 consensus_queue.cc:1035] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168 [LEADER]: Connected to new peer: Peer: permanent_uuid: "22912eefbeb34010bb4bb7c0acaddec2" member_type: VOTER last_known_addr { host: "127.15.113.3" port: 33391 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.001s
I20250411 13:58:09.008360 21104 consensus_queue.cc:1035] T 05333cbabc6e492d9d8bb10b6394a9ad P 122f34c0ee774ec9a21f147299a21168 [LEADER]: Connected to new peer: Peer: permanent_uuid: "f5f00e6f80124adf9c59bc9893aa28d2" member_type: VOTER last_known_addr { host: "127.15.113.2" port: 39531 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:58:09.207065 20522 catalog_manager.cc:3501] Servicing AlterTable request from {username='slave'} at 127.0.0.1:43002:
table { table_name: "table_to_alter" } alter_schema_steps { type: DROP_RANGE_PARTITION drop_range_partition { range_bounds { rows: "<redacted>""\006\000\000\007\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\000\000\000\000\000\000\000\000\007\001\000\n\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\n\000\000\000\000\000\000\000\007\001\000\024\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\024\000\000\000\000\000\000\000\007\001\000\036\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\036\000\000\000\000\000\000\000\007\001\000(\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000(\000\000\000\000\000\000\000\007\001\0002\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\0002\000\000\000\000\000\000\000\007\001\000<\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000<\000\000\000\000\000\000\000\007\001\000F\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000F\000\000\000\000\000\000\000\007\001\000P\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000P\000\000\000\000\000\000\000\007\001\000Z\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000Z\000\000\000\000\000\000\000\007\001\000d\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000d\000\000\000\000\000\000\000\007\001\000n\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000n\000\000\000\000\000\000\000\007\001\000x\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000x\000\000\000\000\000\000\000\007\001\000\202\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\202\000\000\000\000\000\000\000\007\001\000\214\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\214\000\000\000\000\000\000\000\007\001\000\226\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\226\000\000\000\000\000\000\000\007\001\000\240\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\240\000\000\000\000\000\000\000\007\001\000\252\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\252\000\000\000\000\000\000\000\007\001\000\264\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\264\000\000\000\000\000\000\000\007\001\000\276\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\276\000\000\000\000\000\000\000\007\001\000\310\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\310\000\000\000\000\000\000\000\007\001\000\322\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\322\000\000\000\000\000\000\000\007\001\000\334\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\334\000\000\000\000\000\000\000\007\001\000\346\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\346\000\000\000\000\000\000\000\007\001\000\360\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\360\000\000\000\000\000\000\000\007\001\000\372\000\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\372\000\000\000\000\000\000\000\007\001\000\004\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\004\001\000\000\000\000\000\000\007\001\000\016\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\016\001\000\000\000\000\000\000\007\001\000\030\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\030\001\000\000\000\000\000\000\007\001\000\"\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\"\001\000\000\000\000\000\000\007\001\000,\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000,\001\000\000\000\000\000\000\007\001\0006\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\0006\001\000\000\000\000\000\000\007\001\000@\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000@\001\000\000\000\000\000\000\007\001\000J\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000J\001\000\000\000\000\000\000\007\001\000T\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000T\001\000\000\000\000\000\000\007\001\000^\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000^\001\000\000\000\000\000\000\007\001\000h\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000h\001\000\000\000\000\000\000\007\001\000r\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000r\001\000\000\000\000\000\000\007\001\000|\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000|\001\000\000\000\000\000\000\007\001\000\206\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\206\001\000\000\000\000\000\000\007\001\000\220\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\220\001\000\000\000\000\000\000\007\001\000\232\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\232\001\000\000\000\000\000\000\007\001\000\244\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\244\001\000\000\000\000\000\000\007\001\000\256\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\256\001\000\000\000\000\000\000\007\001\000\270\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\270\001\000\000\000\000\000\000\007\001\000\302\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\302\001\000\000\000\000\000\000\007\001\000\314\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\314\001\000\000\000\000\000\000\007\001\000\326\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\326\001\000\000\000\000\000\000\007\001\000\340\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\340\001\000\000\000\000\000\000\007\001\000\352\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } alter_schema_steps { type: ADD_RANGE_PARTITION add_range_partition { range_bounds { rows: "<redacted>""\006\001\000\352\001\000\000\000\000\000\000\007\001\000\364\001\000\000\000\000\000\000" indirect_data: "<redacted>""" } } } schema { columns { name: "key" type: INT64 is_key: true is_nullable: false encoding: AUTO_ENCODING compression: DEFAULT_COMPRESSION cfile_block_size: 0 immutable: false } columns { name: "string_column" type: STRING is_key: false is_nullable: true encoding: AUTO_ENCODING compression: DEFAULT_COMPRESSION cfile_block_size: 0 immutable: false } } modify_external_catalogs: true
W20250411 13:58:09.560645 20522 catalog_manager.cc:3896] Invalid argument: an error occurred while updating the sys-catalog: write request (76036 bytes in size) is too large for current setting of the --rpc_max_message_size flag
I20250411 13:58:09.649782 15812 external_mini_cluster.cc:1620] Killing /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu with pid 20605
W20250411 13:58:09.685710 20785 connection.cc:537] client connection to 127.15.113.1:33621 recv error: Network error: recv error from unknown peer: Transport endpoint is not connected (error 107)
W20250411 13:58:09.686199 20785 proxy.cc:239] Call had error, refreshing address and retrying: Network error: recv error from unknown peer: Transport endpoint is not connected (error 107)
I20250411 13:58:09.687114 15812 external_mini_cluster.cc:1620] Killing /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu with pid 20767
I20250411 13:58:09.723277 15812 external_mini_cluster.cc:1620] Killing /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu with pid 20907
I20250411 13:58:09.756995 15812 external_mini_cluster.cc:1620] Killing /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu with pid 20336
I20250411 13:58:09.788422 15812 external_mini_cluster.cc:1620] Killing /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu with pid 20408
I20250411 13:58:09.815510 15812 external_mini_cluster.cc:1620] Killing /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu with pid 20483
2025-04-11T13:58:09Z chronyd exiting
[       OK ] MasterReplicationAndRpcSizeLimitTest.AlterTable (15645 ms)
[ RUN      ] MasterReplicationAndRpcSizeLimitTest.TabletReports
2025-04-11T13:58:09Z chronyd version 4.6.1 starting (+CMDMON +NTP +REFCLOCK +RTC -PRIVDROP -SCFILTER -SIGND +ASYNCDNS -NTS -SECHASH -IPV6 +DEBUG)
2025-04-11T13:58:09Z Disabled control of system clock
I20250411 13:58:09.958007 15812 external_mini_cluster.cc:1351] Running /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
/tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-0/wal
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-0/data
--block_manager=log
--webserver_interface=localhost
--never_fsync
--enable_minidumps=false
--redact=none
--metrics_log_interval_ms=1000
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-0/logs
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-0/data/info.pb
--server_dump_info_format=pb
--rpc_server_allow_ephemeral_ports
--unlock_experimental_flags
--unlock_unsafe_flags
--logtostderr
--logbuflevel=-1
--ipki_server_key_size=768
--openssl_security_level_override=0
master
run
--ipki_ca_key_size=768
--tsk_num_rsa_bits=512
--rpc_bind_addresses=127.15.113.62:44055
--webserver_interface=127.15.113.62
--webserver_port=0
--builtin_ntp_servers=127.15.113.20:41765
--builtin_ntp_poll_interval_ms=100
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--rpc_reuseport=true
--master_addresses=127.15.113.62:44055,127.15.113.61:46061,127.15.113.60:41163
--raft_heartbeat_interval_ms=500
--leader_failure_max_missed_heartbeat_periods=2
--rpc_max_message_size_enable_validation=false
--rpc_max_message_size=65536
--consensus_max_batch_size_bytes=64512
--catalog_manager_enable_chunked_tablet_reports=false with env {}
W20250411 13:58:10.260973 21175 flags.cc:425] Enabled unsafe flag: --rpc_max_message_size_enable_validation=false
W20250411 13:58:10.261476 21175 flags.cc:425] Enabled unsafe flag: --openssl_security_level_override=0
W20250411 13:58:10.261788 21175 flags.cc:425] Enabled unsafe flag: --rpc_server_allow_ephemeral_ports=true
W20250411 13:58:10.262197 21175 flags.cc:425] Enabled unsafe flag: --never_fsync=true
W20250411 13:58:10.291518 21175 flags.cc:425] Enabled experimental flag: --ipki_ca_key_size=768
W20250411 13:58:10.291891 21175 flags.cc:425] Enabled experimental flag: --ipki_server_key_size=768
W20250411 13:58:10.292160 21175 flags.cc:425] Enabled experimental flag: --tsk_num_rsa_bits=512
W20250411 13:58:10.292378 21175 flags.cc:425] Enabled experimental flag: --rpc_reuseport=true
I20250411 13:58:10.325649 21175 master_runner.cc:386] Master server non-default flags:
--builtin_ntp_poll_interval_ms=100
--builtin_ntp_servers=127.15.113.20:41765
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--consensus_max_batch_size_bytes=64512
--leader_failure_max_missed_heartbeat_periods=2
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-0/data
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-0/wal
--catalog_manager_enable_chunked_tablet_reports=false
--ipki_ca_key_size=768
--master_addresses=127.15.113.62:44055,127.15.113.61:46061,127.15.113.60:41163
--rpc_max_message_size=65536
--rpc_max_message_size_enable_validation=false
--ipki_server_key_size=768
--openssl_security_level_override=0
--tsk_num_rsa_bits=512
--rpc_bind_addresses=127.15.113.62:44055
--rpc_reuseport=true
--rpc_server_allow_ephemeral_ports=true
--metrics_log_interval_ms=1000
--server_dump_info_format=pb
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-0/data/info.pb
--webserver_interface=127.15.113.62
--webserver_port=0
--never_fsync=true
--redact=none
--unlock_experimental_flags=true
--unlock_unsafe_flags=true
--enable_minidumps=false
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-0/logs
--logbuflevel=-1
--logtostderr=true

Master server version:
kudu 1.18.0-SNAPSHOT
revision 586cfc20905df35f49541b53af4442736790b0c1
build type FASTDEBUG
built by None at 11 Apr 2025 13:43:20 UTC on 5fd53c4cbb9d
build id 5571
TSAN enabled
I20250411 13:58:10.327051 21175 env_posix.cc:2264] Not raising this process' open files per process limit of 1048576; it is already as high as it can go
I20250411 13:58:10.328634 21175 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:58:10.344115 21182 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:58:10.344136 21184 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:58:10.344496 21181 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:58:10.346325 21175 server_base.cc:1034] running on GCE node
I20250411 13:58:11.461174 21175 hybrid_clock.cc:584] initializing the hybrid clock with 'builtin' time source
I20250411 13:58:11.464179 21175 hybrid_clock.cc:630] waiting up to --ntp_initial_sync_wait_secs=10 seconds for the clock to synchronize
I20250411 13:58:11.465591 21175 hybrid_clock.cc:648] HybridClock initialized: now 1744379891465555 us; error 59 us; skew 500 ppm
I20250411 13:58:11.466423 21175 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:58:11.476584 21175 webserver.cc:466] Webserver started at http://127.15.113.62:36871/ using document root <none> and password file <none>
I20250411 13:58:11.477592 21175 fs_manager.cc:362] Metadata directory not provided
I20250411 13:58:11.477840 21175 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:58:11.478295 21175 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:58:11.482561 21175 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-0/data/instance:
uuid: "c617caebdb0c4ce485f80cea5edaa493"
format_stamp: "Formatted at 2025-04-11 13:58:11 on dist-test-slave-jcj2"
I20250411 13:58:11.483696 21175 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-0/wal/instance:
uuid: "c617caebdb0c4ce485f80cea5edaa493"
format_stamp: "Formatted at 2025-04-11 13:58:11 on dist-test-slave-jcj2"
I20250411 13:58:11.490758 21175 fs_manager.cc:696] Time spent creating directory manager: real 0.007s	user 0.006s	sys 0.001s
I20250411 13:58:11.496008 21191 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:58:11.496974 21175 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.003s	sys 0.002s
I20250411 13:58:11.497279 21175 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-0/data,/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-0/wal
uuid: "c617caebdb0c4ce485f80cea5edaa493"
format_stamp: "Formatted at 2025-04-11 13:58:11 on dist-test-slave-jcj2"
I20250411 13:58:11.497601 21175 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-0/wal
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-0/wal
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-0/data/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:58:11.556121 21175 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:58:11.557554 21175 env_posix.cc:2264] Not raising this process' running threads per effective uid limit of 18446744073709551615; it is already as high as it can go
I20250411 13:58:11.558029 21175 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:58:11.627820 21175 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.62:44055
I20250411 13:58:11.627887 21242 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.62:44055 every 8 connection(s)
I20250411 13:58:11.630437 21175 server_base.cc:1166] Dumped server information to /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-0/data/info.pb
I20250411 13:58:11.631820 15812 external_mini_cluster.cc:1413] Started /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu as pid 21175
I20250411 13:58:11.632268 15812 external_mini_cluster.cc:1427] Reading /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-0/wal/instance
I20250411 13:58:11.635634 15812 external_mini_cluster.cc:1351] Running /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
/tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-1/wal
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-1/data
--block_manager=log
--webserver_interface=localhost
--never_fsync
--enable_minidumps=false
--redact=none
--metrics_log_interval_ms=1000
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-1/logs
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-1/data/info.pb
--server_dump_info_format=pb
--rpc_server_allow_ephemeral_ports
--unlock_experimental_flags
--unlock_unsafe_flags
--logtostderr
--logbuflevel=-1
--ipki_server_key_size=768
--openssl_security_level_override=0
master
run
--ipki_ca_key_size=768
--tsk_num_rsa_bits=512
--rpc_bind_addresses=127.15.113.61:46061
--webserver_interface=127.15.113.61
--webserver_port=0
--builtin_ntp_servers=127.15.113.20:41765
--builtin_ntp_poll_interval_ms=100
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--rpc_reuseport=true
--master_addresses=127.15.113.62:44055,127.15.113.61:46061,127.15.113.60:41163
--raft_heartbeat_interval_ms=500
--leader_failure_max_missed_heartbeat_periods=2
--rpc_max_message_size_enable_validation=false
--rpc_max_message_size=65536
--consensus_max_batch_size_bytes=64512
--catalog_manager_enable_chunked_tablet_reports=false with env {}
I20250411 13:58:11.636619 21243 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:11.646808 21243 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:58:11.670451 21243 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:58:11.672554 21192 proxy.cc:239] Call had error, refreshing address and retrying: Network error: Client connection negotiation failed: client connection to 127.15.113.61:46061: connect: Connection refused (error 111)
W20250411 13:58:11.676523 21243 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:46061: Network error: Client connection negotiation failed: client connection to 127.15.113.61:46061: connect: Connection refused (error 111)
I20250411 13:58:11.726114 21243 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } attempt: 1
W20250411 13:58:11.729780 21243 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:46061: Network error: Client connection negotiation failed: client connection to 127.15.113.61:46061: connect: Connection refused (error 111)
I20250411 13:58:11.798154 21243 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } attempt: 2
W20250411 13:58:11.801852 21243 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:46061: Network error: Client connection negotiation failed: client connection to 127.15.113.61:46061: connect: Connection refused (error 111)
I20250411 13:58:11.893306 21243 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } attempt: 3
W20250411 13:58:11.896965 21243 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:46061: Network error: Client connection negotiation failed: client connection to 127.15.113.61:46061: connect: Connection refused (error 111)
W20250411 13:58:11.943588 21247 flags.cc:425] Enabled unsafe flag: --rpc_max_message_size_enable_validation=false
W20250411 13:58:11.944054 21247 flags.cc:425] Enabled unsafe flag: --openssl_security_level_override=0
W20250411 13:58:11.944344 21247 flags.cc:425] Enabled unsafe flag: --rpc_server_allow_ephemeral_ports=true
W20250411 13:58:11.944736 21247 flags.cc:425] Enabled unsafe flag: --never_fsync=true
W20250411 13:58:11.973774 21247 flags.cc:425] Enabled experimental flag: --ipki_ca_key_size=768
W20250411 13:58:11.974200 21247 flags.cc:425] Enabled experimental flag: --ipki_server_key_size=768
W20250411 13:58:11.974447 21247 flags.cc:425] Enabled experimental flag: --tsk_num_rsa_bits=512
W20250411 13:58:11.974697 21247 flags.cc:425] Enabled experimental flag: --rpc_reuseport=true
I20250411 13:58:12.007511 21247 master_runner.cc:386] Master server non-default flags:
--builtin_ntp_poll_interval_ms=100
--builtin_ntp_servers=127.15.113.20:41765
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--consensus_max_batch_size_bytes=64512
--leader_failure_max_missed_heartbeat_periods=2
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-1/data
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-1/wal
--catalog_manager_enable_chunked_tablet_reports=false
--ipki_ca_key_size=768
--master_addresses=127.15.113.62:44055,127.15.113.61:46061,127.15.113.60:41163
--rpc_max_message_size=65536
--rpc_max_message_size_enable_validation=false
--ipki_server_key_size=768
--openssl_security_level_override=0
--tsk_num_rsa_bits=512
--rpc_bind_addresses=127.15.113.61:46061
--rpc_reuseport=true
--rpc_server_allow_ephemeral_ports=true
--metrics_log_interval_ms=1000
--server_dump_info_format=pb
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-1/data/info.pb
--webserver_interface=127.15.113.61
--webserver_port=0
--never_fsync=true
--redact=none
--unlock_experimental_flags=true
--unlock_unsafe_flags=true
--enable_minidumps=false
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-1/logs
--logbuflevel=-1
--logtostderr=true

Master server version:
kudu 1.18.0-SNAPSHOT
revision 586cfc20905df35f49541b53af4442736790b0c1
build type FASTDEBUG
built by None at 11 Apr 2025 13:43:20 UTC on 5fd53c4cbb9d
build id 5571
TSAN enabled
I20250411 13:58:12.008836 21247 env_posix.cc:2264] Not raising this process' open files per process limit of 1048576; it is already as high as it can go
I20250411 13:58:12.010479 21247 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:58:12.023959 21254 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:58:12.040396 21243 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } attempt: 4
W20250411 13:58:12.047648 21243 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:46061: Network error: Client connection negotiation failed: client connection to 127.15.113.61:46061: connect: Connection refused (error 111)
I20250411 13:58:12.347213 21243 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } attempt: 5
W20250411 13:58:12.354856 21243 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:46061: Network error: Client connection negotiation failed: client connection to 127.15.113.61:46061: connect: Connection refused (error 111)
I20250411 13:58:12.902416 21243 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } attempt: 6
W20250411 13:58:12.922327 21243 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.61:46061: Network error: Client connection negotiation failed: client connection to 127.15.113.61:46061: connect: Connection refused (error 111)
W20250411 13:58:12.023959 21255 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:58:13.169426 21256 instance_detector.cc:116] could not retrieve GCE instance metadata: Timed out: curl timeout: Timeout was reached: Resolving timed out after 1145 milliseconds
W20250411 13:58:13.170729 21257 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:58:13.174000 21247 thread.cc:640] OpenStack (cloud detector) Time spent creating pthread: real 1.153s	user 0.382s	sys 0.767s
W20250411 13:58:13.174265 21247 thread.cc:606] OpenStack (cloud detector) Time spent starting thread: real 1.154s	user 0.382s	sys 0.767s
I20250411 13:58:13.174476 21247 server_base.cc:1029] Not found: could not retrieve instance metadata: unable to detect cloud type of this node, probably running in non-cloud environment
I20250411 13:58:13.175557 21247 hybrid_clock.cc:584] initializing the hybrid clock with 'builtin' time source
I20250411 13:58:13.178103 21247 hybrid_clock.cc:630] waiting up to --ntp_initial_sync_wait_secs=10 seconds for the clock to synchronize
I20250411 13:58:13.179436 21247 hybrid_clock.cc:648] HybridClock initialized: now 1744379893179390 us; error 63 us; skew 500 ppm
I20250411 13:58:13.180214 21247 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:58:13.187500 21247 webserver.cc:466] Webserver started at http://127.15.113.61:44779/ using document root <none> and password file <none>
I20250411 13:58:13.188515 21247 fs_manager.cc:362] Metadata directory not provided
I20250411 13:58:13.188766 21247 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:58:13.189224 21247 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:58:13.193593 21247 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-1/data/instance:
uuid: "9c23878334f04b2186321611d56b1747"
format_stamp: "Formatted at 2025-04-11 13:58:13 on dist-test-slave-jcj2"
I20250411 13:58:13.194669 21247 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-1/wal/instance:
uuid: "9c23878334f04b2186321611d56b1747"
format_stamp: "Formatted at 2025-04-11 13:58:13 on dist-test-slave-jcj2"
I20250411 13:58:13.202544 21247 fs_manager.cc:696] Time spent creating directory manager: real 0.007s	user 0.005s	sys 0.001s
I20250411 13:58:13.208648 21268 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:58:13.209851 21247 fs_manager.cc:730] Time spent opening block manager: real 0.004s	user 0.005s	sys 0.000s
I20250411 13:58:13.210175 21247 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-1/data,/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-1/wal
uuid: "9c23878334f04b2186321611d56b1747"
format_stamp: "Formatted at 2025-04-11 13:58:13 on dist-test-slave-jcj2"
I20250411 13:58:13.210502 21247 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-1/wal
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-1/wal
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-1/data/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:58:13.297251 21247 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:58:13.298764 21247 env_posix.cc:2264] Not raising this process' running threads per effective uid limit of 18446744073709551615; it is already as high as it can go
I20250411 13:58:13.299243 21247 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:58:13.367278 21247 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.61:46061
I20250411 13:58:13.367359 21319 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.61:46061 every 8 connection(s)
I20250411 13:58:13.369956 21247 server_base.cc:1166] Dumped server information to /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-1/data/info.pb
I20250411 13:58:13.374106 15812 external_mini_cluster.cc:1413] Started /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu as pid 21247
I20250411 13:58:13.374933 15812 external_mini_cluster.cc:1427] Reading /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-1/wal/instance
I20250411 13:58:13.375962 21320 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:13.379292 15812 external_mini_cluster.cc:1351] Running /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
/tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-2/wal
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-2/data
--block_manager=log
--webserver_interface=localhost
--never_fsync
--enable_minidumps=false
--redact=none
--metrics_log_interval_ms=1000
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-2/logs
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-2/data/info.pb
--server_dump_info_format=pb
--rpc_server_allow_ephemeral_ports
--unlock_experimental_flags
--unlock_unsafe_flags
--logtostderr
--logbuflevel=-1
--ipki_server_key_size=768
--openssl_security_level_override=0
master
run
--ipki_ca_key_size=768
--tsk_num_rsa_bits=512
--rpc_bind_addresses=127.15.113.60:41163
--webserver_interface=127.15.113.60
--webserver_port=0
--builtin_ntp_servers=127.15.113.20:41765
--builtin_ntp_poll_interval_ms=100
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--rpc_reuseport=true
--master_addresses=127.15.113.62:44055,127.15.113.61:46061,127.15.113.60:41163
--raft_heartbeat_interval_ms=500
--leader_failure_max_missed_heartbeat_periods=2
--rpc_max_message_size_enable_validation=false
--rpc_max_message_size=65536
--consensus_max_batch_size_bytes=64512
--catalog_manager_enable_chunked_tablet_reports=false with env {}
I20250411 13:58:13.393147 21320 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:58:13.413712 21320 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:58:13.428224 21320 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:58:13.430487 21272 proxy.cc:239] Call had error, refreshing address and retrying: Network error: Client connection negotiation failed: client connection to 127.15.113.60:41163: connect: Connection refused (error 111)
W20250411 13:58:13.434844 21320 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:41163: Network error: Client connection negotiation failed: client connection to 127.15.113.60:41163: connect: Connection refused (error 111)
I20250411 13:58:13.484306 21320 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } attempt: 1
W20250411 13:58:13.488588 21320 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:41163: Network error: Client connection negotiation failed: client connection to 127.15.113.60:41163: connect: Connection refused (error 111)
I20250411 13:58:13.556962 21320 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } attempt: 2
W20250411 13:58:13.561091 21320 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:41163: Network error: Client connection negotiation failed: client connection to 127.15.113.60:41163: connect: Connection refused (error 111)
I20250411 13:58:13.652498 21320 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } attempt: 3
W20250411 13:58:13.656469 21320 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:41163: Network error: Client connection negotiation failed: client connection to 127.15.113.60:41163: connect: Connection refused (error 111)
W20250411 13:58:13.705282 21324 flags.cc:425] Enabled unsafe flag: --rpc_max_message_size_enable_validation=false
W20250411 13:58:13.705799 21324 flags.cc:425] Enabled unsafe flag: --openssl_security_level_override=0
W20250411 13:58:13.706135 21324 flags.cc:425] Enabled unsafe flag: --rpc_server_allow_ephemeral_ports=true
W20250411 13:58:13.706562 21324 flags.cc:425] Enabled unsafe flag: --never_fsync=true
W20250411 13:58:13.737197 21324 flags.cc:425] Enabled experimental flag: --ipki_ca_key_size=768
W20250411 13:58:13.737612 21324 flags.cc:425] Enabled experimental flag: --ipki_server_key_size=768
W20250411 13:58:13.737879 21324 flags.cc:425] Enabled experimental flag: --tsk_num_rsa_bits=512
W20250411 13:58:13.738119 21324 flags.cc:425] Enabled experimental flag: --rpc_reuseport=true
I20250411 13:58:13.772634 21324 master_runner.cc:386] Master server non-default flags:
--builtin_ntp_poll_interval_ms=100
--builtin_ntp_servers=127.15.113.20:41765
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--consensus_max_batch_size_bytes=64512
--leader_failure_max_missed_heartbeat_periods=2
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-2/data
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-2/wal
--catalog_manager_enable_chunked_tablet_reports=false
--ipki_ca_key_size=768
--master_addresses=127.15.113.62:44055,127.15.113.61:46061,127.15.113.60:41163
--rpc_max_message_size=65536
--rpc_max_message_size_enable_validation=false
--ipki_server_key_size=768
--openssl_security_level_override=0
--tsk_num_rsa_bits=512
--rpc_bind_addresses=127.15.113.60:41163
--rpc_reuseport=true
--rpc_server_allow_ephemeral_ports=true
--metrics_log_interval_ms=1000
--server_dump_info_format=pb
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-2/data/info.pb
--webserver_interface=127.15.113.60
--webserver_port=0
--never_fsync=true
--redact=none
--unlock_experimental_flags=true
--unlock_unsafe_flags=true
--enable_minidumps=false
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-2/logs
--logbuflevel=-1
--logtostderr=true

Master server version:
kudu 1.18.0-SNAPSHOT
revision 586cfc20905df35f49541b53af4442736790b0c1
build type FASTDEBUG
built by None at 11 Apr 2025 13:43:20 UTC on 5fd53c4cbb9d
build id 5571
TSAN enabled
I20250411 13:58:13.774039 21324 env_posix.cc:2264] Not raising this process' open files per process limit of 1048576; it is already as high as it can go
I20250411 13:58:13.775674 21324 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:58:13.789039 21333 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:58:13.799829 21320 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } attempt: 4
W20250411 13:58:13.804262 21320 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:41163: Network error: Client connection negotiation failed: client connection to 127.15.113.60:41163: connect: Connection refused (error 111)
I20250411 13:58:13.982952 21243 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } attempt: 7
I20250411 13:58:13.997680 21243 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } has no permanent_uuid. Determining permanent_uuid...
W20250411 13:58:14.004343 21243 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:41163: Network error: Client connection negotiation failed: client connection to 127.15.113.60:41163: connect: Connection refused (error 111)
I20250411 13:58:14.062711 21243 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } attempt: 1
W20250411 13:58:14.067790 21243 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:41163: Network error: Client connection negotiation failed: client connection to 127.15.113.60:41163: connect: Connection refused (error 111)
I20250411 13:58:14.103673 21320 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } attempt: 5
W20250411 13:58:14.109012 21320 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:41163: Network error: Client connection negotiation failed: client connection to 127.15.113.60:41163: connect: Connection refused (error 111)
I20250411 13:58:14.149284 21243 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } attempt: 2
W20250411 13:58:14.153997 21243 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:41163: Network error: Client connection negotiation failed: client connection to 127.15.113.60:41163: connect: Connection refused (error 111)
I20250411 13:58:14.239465 21243 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } attempt: 3
W20250411 13:58:14.244374 21243 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:41163: Network error: Client connection negotiation failed: client connection to 127.15.113.60:41163: connect: Connection refused (error 111)
I20250411 13:58:14.384758 21243 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } attempt: 4
W20250411 13:58:14.388504 21243 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:41163: Network error: Client connection negotiation failed: client connection to 127.15.113.60:41163: connect: Connection refused (error 111)
I20250411 13:58:14.656524 21320 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } attempt: 6
W20250411 13:58:14.670272 21320 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:41163: Network error: Client connection negotiation failed: client connection to 127.15.113.60:41163: connect: Connection refused (error 111)
I20250411 13:58:14.671828 21243 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } attempt: 5
W20250411 13:58:14.675691 21243 consensus_peers.cc:646] Error getting permanent uuid from config peer 127.15.113.60:41163: Network error: Client connection negotiation failed: client connection to 127.15.113.60:41163: connect: Connection refused (error 111)
W20250411 13:58:13.789582 21332 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:58:13.790632 21335 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:58:13.792029 21324 server_base.cc:1034] running on GCE node
I20250411 13:58:14.940383 21324 hybrid_clock.cc:584] initializing the hybrid clock with 'builtin' time source
I20250411 13:58:14.942752 21324 hybrid_clock.cc:630] waiting up to --ntp_initial_sync_wait_secs=10 seconds for the clock to synchronize
I20250411 13:58:14.944137 21324 hybrid_clock.cc:648] HybridClock initialized: now 1744379894944096 us; error 64 us; skew 500 ppm
I20250411 13:58:14.944895 21324 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:58:14.951211 21324 webserver.cc:466] Webserver started at http://127.15.113.60:34943/ using document root <none> and password file <none>
I20250411 13:58:14.952342 21324 fs_manager.cc:362] Metadata directory not provided
I20250411 13:58:14.952594 21324 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:58:14.953258 21324 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:58:14.957782 21324 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-2/data/instance:
uuid: "23a3489cd63645c5a7e6e398a53cd182"
format_stamp: "Formatted at 2025-04-11 13:58:14 on dist-test-slave-jcj2"
I20250411 13:58:14.958861 21324 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-2/wal/instance:
uuid: "23a3489cd63645c5a7e6e398a53cd182"
format_stamp: "Formatted at 2025-04-11 13:58:14 on dist-test-slave-jcj2"
I20250411 13:58:14.965708 21324 fs_manager.cc:696] Time spent creating directory manager: real 0.006s	user 0.001s	sys 0.005s
I20250411 13:58:14.970867 21347 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:58:14.971804 21324 fs_manager.cc:730] Time spent opening block manager: real 0.003s	user 0.001s	sys 0.003s
I20250411 13:58:14.972108 21324 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-2/data,/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-2/wal
uuid: "23a3489cd63645c5a7e6e398a53cd182"
format_stamp: "Formatted at 2025-04-11 13:58:14 on dist-test-slave-jcj2"
I20250411 13:58:14.972409 21324 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-2/wal
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-2/wal
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-2/data/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:58:15.036288 21324 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:58:15.037708 21324 env_posix.cc:2264] Not raising this process' running threads per effective uid limit of 18446744073709551615; it is already as high as it can go
I20250411 13:58:15.038132 21324 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:58:15.104007 21324 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.60:41163
I20250411 13:58:15.104070 21398 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.60:41163 every 8 connection(s)
I20250411 13:58:15.106632 21324 server_base.cc:1166] Dumped server information to /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-2/data/info.pb
I20250411 13:58:15.108563 15812 external_mini_cluster.cc:1413] Started /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu as pid 21324
I20250411 13:58:15.109100 15812 external_mini_cluster.cc:1427] Reading /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/master-2/wal/instance
I20250411 13:58:15.112723 21399 data_dirs.cc:400] Could only allocate 1 dirs of requested 3 for tablet 00000000000000000000000000000000. 1 dirs total, 0 dirs full, 0 dirs failed
I20250411 13:58:15.128916 21399 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:58:15.151458 21399 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:58:15.160730 21399 sys_catalog.cc:422] member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } has no permanent_uuid. Determining permanent_uuid...
I20250411 13:58:15.176571 21399 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182: Bootstrap starting.
I20250411 13:58:15.182025 21399 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:15.184168 21399 log.cc:826] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182: Log is configured to *not* fsync() on all Append() calls
I20250411 13:58:15.188444 21399 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182: No bootstrap required, opened a new log
I20250411 13:58:15.206658 21399 raft_consensus.cc:357] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "c617caebdb0c4ce485f80cea5edaa493" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } } peers { permanent_uuid: "9c23878334f04b2186321611d56b1747" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } } peers { permanent_uuid: "23a3489cd63645c5a7e6e398a53cd182" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } }
I20250411 13:58:15.207597 21399 raft_consensus.cc:383] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:15.207810 21399 raft_consensus.cc:738] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 23a3489cd63645c5a7e6e398a53cd182, State: Initialized, Role: FOLLOWER
I20250411 13:58:15.208452 21399 consensus_queue.cc:260] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "c617caebdb0c4ce485f80cea5edaa493" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } } peers { permanent_uuid: "9c23878334f04b2186321611d56b1747" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } } peers { permanent_uuid: "23a3489cd63645c5a7e6e398a53cd182" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } }
I20250411 13:58:15.210489 21407 sys_catalog.cc:455] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "c617caebdb0c4ce485f80cea5edaa493" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } } peers { permanent_uuid: "9c23878334f04b2186321611d56b1747" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } } peers { permanent_uuid: "23a3489cd63645c5a7e6e398a53cd182" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } } }
I20250411 13:58:15.211130 21407 sys_catalog.cc:458] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:58:15.211879 21399 sys_catalog.cc:564] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:58:15.228024 21243 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } attempt: 6
W20250411 13:58:15.238838 21418 catalog_manager.cc:1560] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
W20250411 13:58:15.239674 21418 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
I20250411 13:58:15.251214 21243 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493: Bootstrap starting.
I20250411 13:58:15.232012 21324 master_runner.cc:186] Error getting master registration for 127.15.113.62:44055: OK, instance_id { permanent_uuid: "c617caebdb0c4ce485f80cea5edaa493" instance_seqno: 1744379891608906 } error { code: CATALOG_MANAGER_NOT_INITIALIZED status { code: SERVICE_UNAVAILABLE message: "Catalog manager is not initialized. State: Starting" } }
I20250411 13:58:15.256382 21324 master_runner.cc:418] Couldn't verify the masters in the cluster. Trying again...
I20250411 13:58:15.258844 21243 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:15.261003 21243 log.cc:826] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493: Log is configured to *not* fsync() on all Append() calls
I20250411 13:58:15.266009 21243 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493: No bootstrap required, opened a new log
I20250411 13:58:15.290551 21243 raft_consensus.cc:357] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "c617caebdb0c4ce485f80cea5edaa493" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } } peers { permanent_uuid: "9c23878334f04b2186321611d56b1747" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } } peers { permanent_uuid: "23a3489cd63645c5a7e6e398a53cd182" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } }
I20250411 13:58:15.291251 21243 raft_consensus.cc:383] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:15.291492 21243 raft_consensus.cc:738] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: c617caebdb0c4ce485f80cea5edaa493, State: Initialized, Role: FOLLOWER
I20250411 13:58:15.292141 21243 consensus_queue.cc:260] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "c617caebdb0c4ce485f80cea5edaa493" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } } peers { permanent_uuid: "9c23878334f04b2186321611d56b1747" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } } peers { permanent_uuid: "23a3489cd63645c5a7e6e398a53cd182" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } }
W20250411 13:58:15.292876 21353 tablet.cc:2367] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182: Can't schedule compaction. Clean time has not been advanced past its initial value.
I20250411 13:58:15.295490 21422 sys_catalog.cc:455] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "c617caebdb0c4ce485f80cea5edaa493" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } } peers { permanent_uuid: "9c23878334f04b2186321611d56b1747" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } } peers { permanent_uuid: "23a3489cd63645c5a7e6e398a53cd182" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } } }
I20250411 13:58:15.296262 21422 sys_catalog.cc:458] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:58:15.296765 21243 sys_catalog.cc:564] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [sys.catalog]: configured and running, proceeding with master startup.
W20250411 13:58:15.316704 21197 tablet.cc:2367] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493: Can't schedule compaction. Clean time has not been advanced past its initial value.
W20250411 13:58:15.333961 21433 catalog_manager.cc:1560] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493: loading cluster ID for follower catalog manager: Not found: cluster ID entry not found
W20250411 13:58:15.334399 21433 catalog_manager.cc:875] Not found: cluster ID entry not found: failed to prepare follower catalog manager, will retry
I20250411 13:58:15.325078 21175 master_runner.cc:186] Error getting master registration for 127.15.113.61:46061: OK, instance_id { permanent_uuid: "9c23878334f04b2186321611d56b1747" instance_seqno: 1744379893348030 } error { code: CATALOG_MANAGER_NOT_INITIALIZED status { code: SERVICE_UNAVAILABLE message: "Catalog manager is not initialized. State: Starting" } }
I20250411 13:58:15.350510 21175 master_runner.cc:418] Couldn't verify the masters in the cluster. Trying again...
I20250411 13:58:15.499614 21422 raft_consensus.cc:491] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [term 0 FOLLOWER]: Starting pre-election (no leader contacted us within the election timeout)
I20250411 13:58:15.500159 21422 raft_consensus.cc:513] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [term 0 FOLLOWER]: Starting pre-election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "c617caebdb0c4ce485f80cea5edaa493" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } } peers { permanent_uuid: "9c23878334f04b2186321611d56b1747" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } } peers { permanent_uuid: "23a3489cd63645c5a7e6e398a53cd182" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } }
I20250411 13:58:15.502652 21422 leader_election.cc:290] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [CANDIDATE]: Term 1 pre-election: Requested pre-vote from peers 9c23878334f04b2186321611d56b1747 (127.15.113.61:46061), 23a3489cd63645c5a7e6e398a53cd182 (127.15.113.60:41163)
I20250411 13:58:15.507189 21374 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "c617caebdb0c4ce485f80cea5edaa493" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "23a3489cd63645c5a7e6e398a53cd182" is_pre_election: true
I20250411 13:58:15.508036 21374 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182 [term 0 FOLLOWER]: Leader pre-election vote request: Granting yes vote for candidate c617caebdb0c4ce485f80cea5edaa493 in term 0.
I20250411 13:58:15.509460 21195 leader_election.cc:304] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [CANDIDATE]: Term 1 pre-election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 23a3489cd63645c5a7e6e398a53cd182, c617caebdb0c4ce485f80cea5edaa493; no voters: 
I20250411 13:58:15.510252 21422 raft_consensus.cc:2802] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [term 0 FOLLOWER]: Leader pre-election won for term 1
I20250411 13:58:15.510581 21422 raft_consensus.cc:491] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [term 0 FOLLOWER]: Starting leader election (no leader contacted us within the election timeout)
I20250411 13:58:15.510903 21422 raft_consensus.cc:3058] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:15.517606 21422 raft_consensus.cc:513] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [term 1 FOLLOWER]: Starting leader election with config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "c617caebdb0c4ce485f80cea5edaa493" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } } peers { permanent_uuid: "9c23878334f04b2186321611d56b1747" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } } peers { permanent_uuid: "23a3489cd63645c5a7e6e398a53cd182" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } }
I20250411 13:58:15.519343 21422 leader_election.cc:290] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [CANDIDATE]: Term 1 election: Requested vote from peers 9c23878334f04b2186321611d56b1747 (127.15.113.61:46061), 23a3489cd63645c5a7e6e398a53cd182 (127.15.113.60:41163)
I20250411 13:58:15.520825 21374 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "c617caebdb0c4ce485f80cea5edaa493" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "23a3489cd63645c5a7e6e398a53cd182"
I20250411 13:58:15.521332 21374 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:15.528594 21374 raft_consensus.cc:2466] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182 [term 1 FOLLOWER]: Leader election vote request: Granting yes vote for candidate c617caebdb0c4ce485f80cea5edaa493 in term 1.
I20250411 13:58:15.530112 21195 leader_election.cc:304] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [CANDIDATE]: Term 1 election: Election decided. Result: candidate won. Election summary: received 2 responses out of 3 voters: 2 yes votes; 0 no votes. yes voters: 23a3489cd63645c5a7e6e398a53cd182, c617caebdb0c4ce485f80cea5edaa493; no voters: 
I20250411 13:58:15.537804 21422 raft_consensus.cc:2802] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [term 1 FOLLOWER]: Leader election won for term 1
I20250411 13:58:15.545783 21422 raft_consensus.cc:695] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [term 1 LEADER]: Becoming Leader. State: Replica: c617caebdb0c4ce485f80cea5edaa493, State: Running, Role: LEADER
I20250411 13:58:15.546756 21422 consensus_queue.cc:237] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [LEADER]: Queue going to LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 1, Majority size: 2, State: 0, Mode: LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "c617caebdb0c4ce485f80cea5edaa493" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } } peers { permanent_uuid: "9c23878334f04b2186321611d56b1747" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } } peers { permanent_uuid: "23a3489cd63645c5a7e6e398a53cd182" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } }
I20250411 13:58:15.558142 21435 sys_catalog.cc:455] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [sys.catalog]: SysCatalogTable state changed. Reason: New leader c617caebdb0c4ce485f80cea5edaa493. Latest consensus state: current_term: 1 leader_uuid: "c617caebdb0c4ce485f80cea5edaa493" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "c617caebdb0c4ce485f80cea5edaa493" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } } peers { permanent_uuid: "9c23878334f04b2186321611d56b1747" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } } peers { permanent_uuid: "23a3489cd63645c5a7e6e398a53cd182" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } } }
I20250411 13:58:15.558943 21435 sys_catalog.cc:458] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [sys.catalog]: This master's current role is: LEADER
I20250411 13:58:15.569022 21437 catalog_manager.cc:1477] Loading table and tablet metadata into memory...
I20250411 13:58:15.575639 21437 catalog_manager.cc:1486] Initializing Kudu cluster ID...
I20250411 13:58:15.598682 21374 raft_consensus.cc:1273] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182 [term 1 FOLLOWER]: Refusing update from remote peer c617caebdb0c4ce485f80cea5edaa493: Log matching property violated. Preceding OpId in replica: term: 0 index: 0. Preceding OpId from leader: term: 1 index: 2. (index mismatch)
I20250411 13:58:15.600432 21435 consensus_queue.cc:1035] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [LEADER]: Connected to new peer: Peer: permanent_uuid: "23a3489cd63645c5a7e6e398a53cd182" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:58:15.632735 21407 sys_catalog.cc:455] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182 [sys.catalog]: SysCatalogTable state changed. Reason: New leader c617caebdb0c4ce485f80cea5edaa493. Latest consensus state: current_term: 1 leader_uuid: "c617caebdb0c4ce485f80cea5edaa493" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "c617caebdb0c4ce485f80cea5edaa493" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } } peers { permanent_uuid: "9c23878334f04b2186321611d56b1747" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } } peers { permanent_uuid: "23a3489cd63645c5a7e6e398a53cd182" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } } }
I20250411 13:58:15.633821 21407 sys_catalog.cc:458] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:58:15.640448 21439 mvcc.cc:204] Tried to move back new op lower bound from 7144980052342935552 to 7144980052169117696. Current Snapshot: MvccSnapshot[applied={T|T < 7144980052342935552}]
I20250411 13:58:15.646456 21407 sys_catalog.cc:455] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182 [sys.catalog]: SysCatalogTable state changed. Reason: Replicated consensus-only round. Latest consensus state: current_term: 1 leader_uuid: "c617caebdb0c4ce485f80cea5edaa493" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "c617caebdb0c4ce485f80cea5edaa493" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } } peers { permanent_uuid: "9c23878334f04b2186321611d56b1747" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } } peers { permanent_uuid: "23a3489cd63645c5a7e6e398a53cd182" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } } }
I20250411 13:58:15.647248 21407 sys_catalog.cc:458] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:58:15.648301 21438 mvcc.cc:204] Tried to move back new op lower bound from 7144980052342935552 to 7144980052169117696. Current Snapshot: MvccSnapshot[applied={T|T < 7144980052342935552}]
I20250411 13:58:15.657790 21435 sys_catalog.cc:455] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [sys.catalog]: SysCatalogTable state changed. Reason: Peer health change. Latest consensus state: current_term: 1 leader_uuid: "c617caebdb0c4ce485f80cea5edaa493" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "c617caebdb0c4ce485f80cea5edaa493" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } } peers { permanent_uuid: "9c23878334f04b2186321611d56b1747" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } } peers { permanent_uuid: "23a3489cd63645c5a7e6e398a53cd182" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } } }
I20250411 13:58:15.658463 21435 sys_catalog.cc:458] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [sys.catalog]: This master's current role is: LEADER
I20250411 13:58:15.659597 21437 catalog_manager.cc:1349] Generated new cluster ID: e0081bce69344e9d83f6f46eb9fa2daa
I20250411 13:58:15.659860 21437 catalog_manager.cc:1497] Initializing Kudu internal certificate authority...
I20250411 13:58:15.695480 21437 catalog_manager.cc:1372] Generated new certificate authority record
I20250411 13:58:15.700115 21437 catalog_manager.cc:1506] Loading token signing keys...
I20250411 13:58:15.730827 21320 consensus_peers.cc:656] Retrying to get permanent uuid for remote peer: member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } attempt: 7
I20250411 13:58:15.761516 21437 catalog_manager.cc:5954] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493: Generated new TSK 0
I20250411 13:58:15.764992 21437 catalog_manager.cc:1516] Initializing in-progress tserver states...
I20250411 13:58:15.799777 21320 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 9c23878334f04b2186321611d56b1747: Bootstrap starting.
I20250411 13:58:15.806241 15812 external_mini_cluster.cc:1351] Running /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
/tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-0/wal
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-0/data
--block_manager=log
--webserver_interface=localhost
--never_fsync
--enable_minidumps=false
--redact=none
--metrics_log_interval_ms=1000
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-0/logs
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-0/data/info.pb
--server_dump_info_format=pb
--rpc_server_allow_ephemeral_ports
--unlock_experimental_flags
--unlock_unsafe_flags
--logtostderr
--logbuflevel=-1
--ipki_server_key_size=768
--openssl_security_level_override=0
tserver
run
--rpc_bind_addresses=127.15.113.1:0
--local_ip_for_outbound_sockets=127.15.113.1
--webserver_interface=127.15.113.1
--webserver_port=0
--tserver_master_addrs=127.15.113.62:44055,127.15.113.61:46061,127.15.113.60:41163
--builtin_ntp_servers=127.15.113.20:41765
--builtin_ntp_poll_interval_ms=100
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--raft_heartbeat_interval_ms=500
--leader_failure_max_missed_heartbeat_periods=2
--rpc_service_queue_length=200 with env {}
I20250411 13:58:15.815047 21320 tablet_bootstrap.cc:654] T 00000000000000000000000000000000 P 9c23878334f04b2186321611d56b1747: Neither blocks nor log segments found. Creating new log.
I20250411 13:58:15.819144 21320 log.cc:826] T 00000000000000000000000000000000 P 9c23878334f04b2186321611d56b1747: Log is configured to *not* fsync() on all Append() calls
I20250411 13:58:15.826706 21320 tablet_bootstrap.cc:492] T 00000000000000000000000000000000 P 9c23878334f04b2186321611d56b1747: No bootstrap required, opened a new log
I20250411 13:58:15.865532 21320 raft_consensus.cc:357] T 00000000000000000000000000000000 P 9c23878334f04b2186321611d56b1747 [term 0 FOLLOWER]: Replica starting. Triggering 0 pending ops. Active config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "c617caebdb0c4ce485f80cea5edaa493" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } } peers { permanent_uuid: "9c23878334f04b2186321611d56b1747" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } } peers { permanent_uuid: "23a3489cd63645c5a7e6e398a53cd182" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } }
I20250411 13:58:15.866802 21320 raft_consensus.cc:383] T 00000000000000000000000000000000 P 9c23878334f04b2186321611d56b1747 [term 0 FOLLOWER]: Consensus starting up: Expiring failure detector timer to make a prompt election more likely
I20250411 13:58:15.867241 21320 raft_consensus.cc:738] T 00000000000000000000000000000000 P 9c23878334f04b2186321611d56b1747 [term 0 FOLLOWER]: Becoming Follower/Learner. State: Replica: 9c23878334f04b2186321611d56b1747, State: Initialized, Role: FOLLOWER
I20250411 13:58:15.868674 21320 consensus_queue.cc:260] T 00000000000000000000000000000000 P 9c23878334f04b2186321611d56b1747 [NON_LEADER]: Queue going to NON_LEADER mode. State: All replicated index: 0, Majority replicated index: 0, Committed index: 0, Last appended: 0.0, Last appended by leader: 0, Current term: 0, Majority size: -1, State: 0, Mode: NON_LEADER, active raft config: opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "c617caebdb0c4ce485f80cea5edaa493" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } } peers { permanent_uuid: "9c23878334f04b2186321611d56b1747" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } } peers { permanent_uuid: "23a3489cd63645c5a7e6e398a53cd182" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } }
I20250411 13:58:15.877272 21446 sys_catalog.cc:455] T 00000000000000000000000000000000 P 9c23878334f04b2186321611d56b1747 [sys.catalog]: SysCatalogTable state changed. Reason: RaftConsensus started. Latest consensus state: current_term: 0 committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "c617caebdb0c4ce485f80cea5edaa493" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } } peers { permanent_uuid: "9c23878334f04b2186321611d56b1747" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } } peers { permanent_uuid: "23a3489cd63645c5a7e6e398a53cd182" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } } }
I20250411 13:58:15.878031 21446 sys_catalog.cc:458] T 00000000000000000000000000000000 P 9c23878334f04b2186321611d56b1747 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:58:15.880702 21320 sys_catalog.cc:564] T 00000000000000000000000000000000 P 9c23878334f04b2186321611d56b1747 [sys.catalog]: configured and running, proceeding with master startup.
I20250411 13:58:15.881160 21293 raft_consensus.cc:3058] T 00000000000000000000000000000000 P 9c23878334f04b2186321611d56b1747 [term 0 FOLLOWER]: Advancing to term 1
I20250411 13:58:15.887140 21295 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "c617caebdb0c4ce485f80cea5edaa493" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "9c23878334f04b2186321611d56b1747" is_pre_election: true
I20250411 13:58:15.889436 21294 tablet_service.cc:1812] Received RequestConsensusVote() RPC: tablet_id: "00000000000000000000000000000000" candidate_uuid: "c617caebdb0c4ce485f80cea5edaa493" candidate_term: 1 candidate_status { last_received { term: 0 index: 0 } } ignore_live_leader: false dest_uuid: "9c23878334f04b2186321611d56b1747"
I20250411 13:58:15.892486 21293 raft_consensus.cc:1273] T 00000000000000000000000000000000 P 9c23878334f04b2186321611d56b1747 [term 1 FOLLOWER]: Refusing update from remote peer c617caebdb0c4ce485f80cea5edaa493: Log matching property violated. Preceding OpId in replica: term: 0 index: 0. Preceding OpId from leader: term: 1 index: 2. (index mismatch)
I20250411 13:58:15.893993 21422 consensus_queue.cc:1035] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [LEADER]: Connected to new peer: Peer: permanent_uuid: "9c23878334f04b2186321611d56b1747" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 }, Status: LMP_MISMATCH, Last received: 0.0, Next index: 1, Last known committed idx: 0, Time since last communication: 0.000s
I20250411 13:58:15.955054 21446 sys_catalog.cc:455] T 00000000000000000000000000000000 P 9c23878334f04b2186321611d56b1747 [sys.catalog]: SysCatalogTable state changed. Reason: New leader c617caebdb0c4ce485f80cea5edaa493. Latest consensus state: current_term: 1 leader_uuid: "c617caebdb0c4ce485f80cea5edaa493" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "c617caebdb0c4ce485f80cea5edaa493" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } } peers { permanent_uuid: "9c23878334f04b2186321611d56b1747" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } } peers { permanent_uuid: "23a3489cd63645c5a7e6e398a53cd182" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } } }
I20250411 13:58:15.955704 21446 sys_catalog.cc:458] T 00000000000000000000000000000000 P 9c23878334f04b2186321611d56b1747 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:58:15.958097 21456 sys_catalog.cc:455] T 00000000000000000000000000000000 P 9c23878334f04b2186321611d56b1747 [sys.catalog]: SysCatalogTable state changed. Reason: Replicated consensus-only round. Latest consensus state: current_term: 1 leader_uuid: "c617caebdb0c4ce485f80cea5edaa493" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "c617caebdb0c4ce485f80cea5edaa493" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } } peers { permanent_uuid: "9c23878334f04b2186321611d56b1747" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } } peers { permanent_uuid: "23a3489cd63645c5a7e6e398a53cd182" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } } }
I20250411 13:58:15.959081 21456 sys_catalog.cc:458] T 00000000000000000000000000000000 P 9c23878334f04b2186321611d56b1747 [sys.catalog]: This master's current role is: FOLLOWER
I20250411 13:58:15.965875 21462 catalog_manager.cc:797] Waiting for catalog manager background task thread to start: Service unavailable: Catalog manager is not initialized. State: Starting
I20250411 13:58:16.245020 21418 catalog_manager.cc:1261] Loaded cluster ID: e0081bce69344e9d83f6f46eb9fa2daa
I20250411 13:58:16.245384 21418 catalog_manager.cc:1554] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182: loading cluster ID for follower catalog manager: success
I20250411 13:58:16.252030 21418 catalog_manager.cc:1576] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182: acquiring CA information for follower catalog manager: success
I20250411 13:58:16.259222 21418 catalog_manager.cc:1604] T 00000000000000000000000000000000 P 23a3489cd63645c5a7e6e398a53cd182: importing token verification keys for follower catalog manager: success; most recent TSK sequence number 0
I20250411 13:58:16.350284 21467 sys_catalog.cc:455] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [sys.catalog]: SysCatalogTable state changed. Reason: Peer health change. Latest consensus state: current_term: 1 leader_uuid: "c617caebdb0c4ce485f80cea5edaa493" committed_config { opid_index: -1 OBSOLETE_local: false peers { permanent_uuid: "c617caebdb0c4ce485f80cea5edaa493" member_type: VOTER last_known_addr { host: "127.15.113.62" port: 44055 } } peers { permanent_uuid: "9c23878334f04b2186321611d56b1747" member_type: VOTER last_known_addr { host: "127.15.113.61" port: 46061 } } peers { permanent_uuid: "23a3489cd63645c5a7e6e398a53cd182" member_type: VOTER last_known_addr { host: "127.15.113.60" port: 41163 } } }
I20250411 13:58:16.351122 21467 sys_catalog.cc:458] T 00000000000000000000000000000000 P c617caebdb0c4ce485f80cea5edaa493 [sys.catalog]: This master's current role is: LEADER
W20250411 13:58:16.368762 21445 flags.cc:425] Enabled unsafe flag: --openssl_security_level_override=0
W20250411 13:58:16.369361 21445 flags.cc:425] Enabled unsafe flag: --rpc_server_allow_ephemeral_ports=true
W20250411 13:58:16.370114 21445 flags.cc:425] Enabled unsafe flag: --never_fsync=true
W20250411 13:58:16.423007 21445 flags.cc:425] Enabled experimental flag: --ipki_server_key_size=768
W20250411 13:58:16.424386 21445 flags.cc:425] Enabled experimental flag: --local_ip_for_outbound_sockets=127.15.113.1
I20250411 13:58:16.484097 21445 tablet_server_runner.cc:78] Tablet server non-default flags:
--builtin_ntp_poll_interval_ms=100
--builtin_ntp_servers=127.15.113.20:41765
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--leader_failure_max_missed_heartbeat_periods=2
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-0/data
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-0/wal
--ipki_server_key_size=768
--openssl_security_level_override=0
--rpc_bind_addresses=127.15.113.1:0
--rpc_server_allow_ephemeral_ports=true
--rpc_service_queue_length=200
--metrics_log_interval_ms=1000
--server_dump_info_format=pb
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-0/data/info.pb
--webserver_interface=127.15.113.1
--webserver_port=0
--tserver_master_addrs=127.15.113.62:44055,127.15.113.61:46061,127.15.113.60:41163
--never_fsync=true
--redact=none
--unlock_experimental_flags=true
--unlock_unsafe_flags=true
--enable_minidumps=false
--local_ip_for_outbound_sockets=127.15.113.1
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-0/logs
--logbuflevel=-1
--logtostderr=true

Tablet server version:
kudu 1.18.0-SNAPSHOT
revision 586cfc20905df35f49541b53af4442736790b0c1
build type FASTDEBUG
built by None at 11 Apr 2025 13:43:20 UTC on 5fd53c4cbb9d
build id 5571
TSAN enabled
I20250411 13:58:16.485731 21445 env_posix.cc:2264] Not raising this process' open files per process limit of 1048576; it is already as high as it can go
I20250411 13:58:16.487736 21445 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:58:16.506809 21470 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:58:16.977360 21462 catalog_manager.cc:1261] Loaded cluster ID: e0081bce69344e9d83f6f46eb9fa2daa
I20250411 13:58:16.981762 21462 catalog_manager.cc:1554] T 00000000000000000000000000000000 P 9c23878334f04b2186321611d56b1747: loading cluster ID for follower catalog manager: success
I20250411 13:58:16.989223 21462 catalog_manager.cc:1576] T 00000000000000000000000000000000 P 9c23878334f04b2186321611d56b1747: acquiring CA information for follower catalog manager: success
I20250411 13:58:17.002219 21462 catalog_manager.cc:1604] T 00000000000000000000000000000000 P 9c23878334f04b2186321611d56b1747: importing token verification keys for follower catalog manager: success; most recent TSK sequence number 0
W20250411 13:58:16.509617 21471 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:58:18.135880 21445 thread.cc:640] GCE (cloud detector) Time spent creating pthread: real 1.628s	user 0.377s	sys 0.615s
W20250411 13:58:18.139905 21445 thread.cc:606] GCE (cloud detector) Time spent starting thread: real 1.632s	user 0.378s	sys 0.615s
W20250411 13:58:17.903931 21469 debug-util.cc:398] Leaking SignalData structure 0x7b080001b040 after lost signal to thread 21445
W20250411 13:58:18.147256 21476 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:58:18.147262 21445 server_base.cc:1034] running on GCE node
I20250411 13:58:18.148808 21445 hybrid_clock.cc:584] initializing the hybrid clock with 'builtin' time source
I20250411 13:58:18.151038 21445 hybrid_clock.cc:630] waiting up to --ntp_initial_sync_wait_secs=10 seconds for the clock to synchronize
I20250411 13:58:18.152493 21445 hybrid_clock.cc:648] HybridClock initialized: now 1744379898152436 us; error 61 us; skew 500 ppm
I20250411 13:58:18.153261 21445 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:58:18.165359 21445 webserver.cc:466] Webserver started at http://127.15.113.1:44875/ using document root <none> and password file <none>
I20250411 13:58:18.166388 21445 fs_manager.cc:362] Metadata directory not provided
I20250411 13:58:18.166603 21445 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:58:18.167125 21445 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:58:18.171350 21445 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-0/data/instance:
uuid: "4a7e5ca5318d4dbe83d14d469bc446d7"
format_stamp: "Formatted at 2025-04-11 13:58:18 on dist-test-slave-jcj2"
I20250411 13:58:18.172349 21445 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-0/wal/instance:
uuid: "4a7e5ca5318d4dbe83d14d469bc446d7"
format_stamp: "Formatted at 2025-04-11 13:58:18 on dist-test-slave-jcj2"
I20250411 13:58:18.180872 21445 fs_manager.cc:696] Time spent creating directory manager: real 0.008s	user 0.006s	sys 0.002s
I20250411 13:58:18.186421 21481 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:58:18.187492 21445 fs_manager.cc:730] Time spent opening block manager: real 0.004s	user 0.003s	sys 0.000s
I20250411 13:58:18.187834 21445 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-0/data,/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-0/wal
uuid: "4a7e5ca5318d4dbe83d14d469bc446d7"
format_stamp: "Formatted at 2025-04-11 13:58:18 on dist-test-slave-jcj2"
I20250411 13:58:18.188155 21445 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-0/wal
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-0/wal
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-0/data/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:58:18.235553 21445 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:58:18.236928 21445 env_posix.cc:2264] Not raising this process' running threads per effective uid limit of 18446744073709551615; it is already as high as it can go
I20250411 13:58:18.237365 21445 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:58:18.240480 21445 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:58:18.244913 21445 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:58:18.245101 21445 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:58:18.245316 21445 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:58:18.245471 21445 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:58:18.434975 21445 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.1:36677
I20250411 13:58:18.435129 21593 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.1:36677 every 8 connection(s)
I20250411 13:58:18.437242 21445 server_base.cc:1166] Dumped server information to /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-0/data/info.pb
I20250411 13:58:18.443974 15812 external_mini_cluster.cc:1413] Started /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu as pid 21445
I20250411 13:58:18.444581 15812 external_mini_cluster.cc:1427] Reading /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-0/wal/instance
I20250411 13:58:18.470166 15812 external_mini_cluster.cc:1351] Running /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
/tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-1/wal
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-1/data
--block_manager=log
--webserver_interface=localhost
--never_fsync
--enable_minidumps=false
--redact=none
--metrics_log_interval_ms=1000
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-1/logs
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-1/data/info.pb
--server_dump_info_format=pb
--rpc_server_allow_ephemeral_ports
--unlock_experimental_flags
--unlock_unsafe_flags
--logtostderr
--logbuflevel=-1
--ipki_server_key_size=768
--openssl_security_level_override=0
tserver
run
--rpc_bind_addresses=127.15.113.2:0
--local_ip_for_outbound_sockets=127.15.113.2
--webserver_interface=127.15.113.2
--webserver_port=0
--tserver_master_addrs=127.15.113.62:44055,127.15.113.61:46061,127.15.113.60:41163
--builtin_ntp_servers=127.15.113.20:41765
--builtin_ntp_poll_interval_ms=100
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--raft_heartbeat_interval_ms=500
--leader_failure_max_missed_heartbeat_periods=2
--rpc_service_queue_length=200 with env {}
I20250411 13:58:18.495033 21594 heartbeater.cc:344] Connected to a master server at 127.15.113.60:41163
I20250411 13:58:18.500172 21594 heartbeater.cc:461] Registering TS with master...
I20250411 13:58:18.501384 21595 heartbeater.cc:344] Connected to a master server at 127.15.113.61:46061
I20250411 13:58:18.501521 21594 heartbeater.cc:507] Master 127.15.113.60:41163 requested a full tablet report, sending...
I20250411 13:58:18.501715 21595 heartbeater.cc:461] Registering TS with master...
I20250411 13:58:18.502584 21595 heartbeater.cc:507] Master 127.15.113.61:46061 requested a full tablet report, sending...
I20250411 13:58:18.504761 21364 ts_manager.cc:194] Registered new tserver with Master: 4a7e5ca5318d4dbe83d14d469bc446d7 (127.15.113.1:36677)
I20250411 13:58:18.505266 21285 ts_manager.cc:194] Registered new tserver with Master: 4a7e5ca5318d4dbe83d14d469bc446d7 (127.15.113.1:36677)
I20250411 13:58:18.509670 21596 heartbeater.cc:344] Connected to a master server at 127.15.113.62:44055
I20250411 13:58:18.509925 21596 heartbeater.cc:461] Registering TS with master...
I20250411 13:58:18.510537 21596 heartbeater.cc:507] Master 127.15.113.62:44055 requested a full tablet report, sending...
I20250411 13:58:18.512454 21208 ts_manager.cc:194] Registered new tserver with Master: 4a7e5ca5318d4dbe83d14d469bc446d7 (127.15.113.1:36677)
I20250411 13:58:18.514338 21208 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.15.113.1:56469
W20250411 13:58:18.859961 21603 flags.cc:425] Enabled unsafe flag: --openssl_security_level_override=0
W20250411 13:58:18.860463 21603 flags.cc:425] Enabled unsafe flag: --rpc_server_allow_ephemeral_ports=true
W20250411 13:58:18.861001 21603 flags.cc:425] Enabled unsafe flag: --never_fsync=true
W20250411 13:58:18.890767 21603 flags.cc:425] Enabled experimental flag: --ipki_server_key_size=768
W20250411 13:58:18.891602 21603 flags.cc:425] Enabled experimental flag: --local_ip_for_outbound_sockets=127.15.113.2
I20250411 13:58:18.925287 21603 tablet_server_runner.cc:78] Tablet server non-default flags:
--builtin_ntp_poll_interval_ms=100
--builtin_ntp_servers=127.15.113.20:41765
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--leader_failure_max_missed_heartbeat_periods=2
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-1/data
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-1/wal
--ipki_server_key_size=768
--openssl_security_level_override=0
--rpc_bind_addresses=127.15.113.2:0
--rpc_server_allow_ephemeral_ports=true
--rpc_service_queue_length=200
--metrics_log_interval_ms=1000
--server_dump_info_format=pb
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-1/data/info.pb
--webserver_interface=127.15.113.2
--webserver_port=0
--tserver_master_addrs=127.15.113.62:44055,127.15.113.61:46061,127.15.113.60:41163
--never_fsync=true
--redact=none
--unlock_experimental_flags=true
--unlock_unsafe_flags=true
--enable_minidumps=false
--local_ip_for_outbound_sockets=127.15.113.2
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-1/logs
--logbuflevel=-1
--logtostderr=true

Tablet server version:
kudu 1.18.0-SNAPSHOT
revision 586cfc20905df35f49541b53af4442736790b0c1
build type FASTDEBUG
built by None at 11 Apr 2025 13:43:20 UTC on 5fd53c4cbb9d
build id 5571
TSAN enabled
I20250411 13:58:18.926928 21603 env_posix.cc:2264] Not raising this process' open files per process limit of 1048576; it is already as high as it can go
I20250411 13:58:18.928503 21603 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:58:18.946367 21611 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:58:19.517234 21596 heartbeater.cc:499] Master 127.15.113.62:44055 was elected leader, sending a full tablet report...
W20250411 13:58:18.948192 21610 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:58:18.949640 21613 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:58:18.949636 21603 server_base.cc:1034] running on GCE node
I20250411 13:58:20.091104 21603 hybrid_clock.cc:584] initializing the hybrid clock with 'builtin' time source
I20250411 13:58:20.093778 21603 hybrid_clock.cc:630] waiting up to --ntp_initial_sync_wait_secs=10 seconds for the clock to synchronize
I20250411 13:58:20.095182 21603 hybrid_clock.cc:648] HybridClock initialized: now 1744379900095147 us; error 56 us; skew 500 ppm
I20250411 13:58:20.095902 21603 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:58:20.102537 21603 webserver.cc:466] Webserver started at http://127.15.113.2:43733/ using document root <none> and password file <none>
I20250411 13:58:20.103691 21603 fs_manager.cc:362] Metadata directory not provided
I20250411 13:58:20.103926 21603 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:58:20.104477 21603 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:58:20.110713 21603 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-1/data/instance:
uuid: "3b0f46fba1964519b708b5a96f11f979"
format_stamp: "Formatted at 2025-04-11 13:58:20 on dist-test-slave-jcj2"
I20250411 13:58:20.112053 21603 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-1/wal/instance:
uuid: "3b0f46fba1964519b708b5a96f11f979"
format_stamp: "Formatted at 2025-04-11 13:58:20 on dist-test-slave-jcj2"
I20250411 13:58:20.121012 21603 fs_manager.cc:696] Time spent creating directory manager: real 0.008s	user 0.009s	sys 0.000s
I20250411 13:58:20.127758 21621 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:58:20.128909 21603 fs_manager.cc:730] Time spent opening block manager: real 0.004s	user 0.004s	sys 0.000s
I20250411 13:58:20.129267 21603 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-1/data,/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-1/wal
uuid: "3b0f46fba1964519b708b5a96f11f979"
format_stamp: "Formatted at 2025-04-11 13:58:20 on dist-test-slave-jcj2"
I20250411 13:58:20.129669 21603 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-1/wal
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-1/wal
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-1/data/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:58:20.176980 21603 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:58:20.178433 21603 env_posix.cc:2264] Not raising this process' running threads per effective uid limit of 18446744073709551615; it is already as high as it can go
I20250411 13:58:20.178833 21603 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:58:20.181102 21603 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:58:20.184882 21603 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:58:20.185071 21603 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:58:20.185253 21603 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:58:20.185379 21603 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:58:20.324402 21603 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.2:43085
I20250411 13:58:20.324487 21734 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.2:43085 every 8 connection(s)
I20250411 13:58:20.326788 21603 server_base.cc:1166] Dumped server information to /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-1/data/info.pb
I20250411 13:58:20.331557 15812 external_mini_cluster.cc:1413] Started /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu as pid 21603
I20250411 13:58:20.332170 15812 external_mini_cluster.cc:1427] Reading /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-1/wal/instance
I20250411 13:58:20.353688 15812 external_mini_cluster.cc:1351] Running /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
/tmp/dist-test-taskQtg33F/build/tsan/bin/kudu
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-2/wal
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-2/data
--block_manager=log
--webserver_interface=localhost
--never_fsync
--enable_minidumps=false
--redact=none
--metrics_log_interval_ms=1000
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-2/logs
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-2/data/info.pb
--server_dump_info_format=pb
--rpc_server_allow_ephemeral_ports
--unlock_experimental_flags
--unlock_unsafe_flags
--logtostderr
--logbuflevel=-1
--ipki_server_key_size=768
--openssl_security_level_override=0
tserver
run
--rpc_bind_addresses=127.15.113.3:0
--local_ip_for_outbound_sockets=127.15.113.3
--webserver_interface=127.15.113.3
--webserver_port=0
--tserver_master_addrs=127.15.113.62:44055,127.15.113.61:46061,127.15.113.60:41163
--builtin_ntp_servers=127.15.113.20:41765
--builtin_ntp_poll_interval_ms=100
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--raft_heartbeat_interval_ms=500
--leader_failure_max_missed_heartbeat_periods=2
--rpc_service_queue_length=200 with env {}
I20250411 13:58:20.372407 21736 heartbeater.cc:344] Connected to a master server at 127.15.113.61:46061
I20250411 13:58:20.372839 21736 heartbeater.cc:461] Registering TS with master...
I20250411 13:58:20.373840 21736 heartbeater.cc:507] Master 127.15.113.61:46061 requested a full tablet report, sending...
I20250411 13:58:20.376402 21285 ts_manager.cc:194] Registered new tserver with Master: 3b0f46fba1964519b708b5a96f11f979 (127.15.113.2:43085)
I20250411 13:58:20.379307 21737 heartbeater.cc:344] Connected to a master server at 127.15.113.62:44055
I20250411 13:58:20.379563 21737 heartbeater.cc:461] Registering TS with master...
I20250411 13:58:20.380242 21737 heartbeater.cc:507] Master 127.15.113.62:44055 requested a full tablet report, sending...
I20250411 13:58:20.382565 21208 ts_manager.cc:194] Registered new tserver with Master: 3b0f46fba1964519b708b5a96f11f979 (127.15.113.2:43085)
I20250411 13:58:20.384610 21208 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.15.113.2:50099
I20250411 13:58:20.388029 21735 heartbeater.cc:344] Connected to a master server at 127.15.113.60:41163
I20250411 13:58:20.388360 21735 heartbeater.cc:461] Registering TS with master...
I20250411 13:58:20.389078 21735 heartbeater.cc:507] Master 127.15.113.60:41163 requested a full tablet report, sending...
I20250411 13:58:20.391306 21364 ts_manager.cc:194] Registered new tserver with Master: 3b0f46fba1964519b708b5a96f11f979 (127.15.113.2:43085)
W20250411 13:58:20.705526 21745 flags.cc:425] Enabled unsafe flag: --openssl_security_level_override=0
W20250411 13:58:20.706010 21745 flags.cc:425] Enabled unsafe flag: --rpc_server_allow_ephemeral_ports=true
W20250411 13:58:20.706528 21745 flags.cc:425] Enabled unsafe flag: --never_fsync=true
W20250411 13:58:20.737025 21745 flags.cc:425] Enabled experimental flag: --ipki_server_key_size=768
W20250411 13:58:20.737916 21745 flags.cc:425] Enabled experimental flag: --local_ip_for_outbound_sockets=127.15.113.3
I20250411 13:58:20.771090 21745 tablet_server_runner.cc:78] Tablet server non-default flags:
--builtin_ntp_poll_interval_ms=100
--builtin_ntp_servers=127.15.113.20:41765
--ntp_initial_sync_wait_secs=10
--time_source=builtin
--leader_failure_max_missed_heartbeat_periods=2
--fs_data_dirs=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-2/data
--fs_wal_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-2/wal
--ipki_server_key_size=768
--openssl_security_level_override=0
--rpc_bind_addresses=127.15.113.3:0
--rpc_server_allow_ephemeral_ports=true
--rpc_service_queue_length=200
--metrics_log_interval_ms=1000
--server_dump_info_format=pb
--server_dump_info_path=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-2/data/info.pb
--webserver_interface=127.15.113.3
--webserver_port=0
--tserver_master_addrs=127.15.113.62:44055,127.15.113.61:46061,127.15.113.60:41163
--never_fsync=true
--redact=none
--unlock_experimental_flags=true
--unlock_unsafe_flags=true
--enable_minidumps=false
--local_ip_for_outbound_sockets=127.15.113.3
--log_dir=/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-2/logs
--logbuflevel=-1
--logtostderr=true

Tablet server version:
kudu 1.18.0-SNAPSHOT
revision 586cfc20905df35f49541b53af4442736790b0c1
build type FASTDEBUG
built by None at 11 Apr 2025 13:43:20 UTC on 5fd53c4cbb9d
build id 5571
TSAN enabled
I20250411 13:58:20.772390 21745 env_posix.cc:2264] Not raising this process' open files per process limit of 1048576; it is already as high as it can go
I20250411 13:58:20.773948 21745 file_cache.cc:492] Constructed file cache file cache with capacity 419430
W20250411 13:58:20.789443 21754 instance_detector.cc:116] could not retrieve OpenStack instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
I20250411 13:58:21.388729 21737 heartbeater.cc:499] Master 127.15.113.62:44055 was elected leader, sending a full tablet report...
W20250411 13:58:21.527014 21239 debug-util.cc:398] Leaking SignalData structure 0x7b080006fdc0 after lost signal to thread 21176
W20250411 13:58:21.527676 21239 debug-util.cc:398] Leaking SignalData structure 0x7b0800095060 after lost signal to thread 21242
W20250411 13:58:20.790405 21752 instance_detector.cc:116] could not retrieve Azure instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:58:20.789665 21751 instance_detector.cc:116] could not retrieve AWS instance metadata: Network error: curl error: HTTP response code said error: The requested URL returned error: 404
W20250411 13:58:21.915055 21753 instance_detector.cc:116] could not retrieve GCE instance metadata: Timed out: curl timeout: Timeout was reached: Connection time-out
I20250411 13:58:21.915488 21745 server_base.cc:1029] Not found: could not retrieve instance metadata: unable to detect cloud type of this node, probably running in non-cloud environment
I20250411 13:58:21.920374 21745 hybrid_clock.cc:584] initializing the hybrid clock with 'builtin' time source
I20250411 13:58:21.923663 21745 hybrid_clock.cc:630] waiting up to --ntp_initial_sync_wait_secs=10 seconds for the clock to synchronize
I20250411 13:58:21.925292 21745 hybrid_clock.cc:648] HybridClock initialized: now 1744379901925227 us; error 57 us; skew 500 ppm
I20250411 13:58:21.926496 21745 server_base.cc:834] Flag tcmalloc_max_total_thread_cache_bytes is not working since tcmalloc is not enabled.
I20250411 13:58:21.935457 21745 webserver.cc:466] Webserver started at http://127.15.113.3:39763/ using document root <none> and password file <none>
I20250411 13:58:21.936821 21745 fs_manager.cc:362] Metadata directory not provided
I20250411 13:58:21.937141 21745 fs_manager.cc:368] Using write-ahead log directory (fs_wal_dir) as metadata directory
I20250411 13:58:21.938241 21745 server_base.cc:882] This appears to be a new deployment of Kudu; creating new FS layout
I20250411 13:58:21.946113 21745 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-2/data/instance:
uuid: "24b34363536c426fbcc89f0ccf75e9dc"
format_stamp: "Formatted at 2025-04-11 13:58:21 on dist-test-slave-jcj2"
I20250411 13:58:21.947782 21745 fs_manager.cc:1068] Generated new instance metadata in path /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-2/wal/instance:
uuid: "24b34363536c426fbcc89f0ccf75e9dc"
format_stamp: "Formatted at 2025-04-11 13:58:21 on dist-test-slave-jcj2"
I20250411 13:58:21.957690 21745 fs_manager.cc:696] Time spent creating directory manager: real 0.009s	user 0.009s	sys 0.001s
I20250411 13:58:21.966205 21762 log_block_manager.cc:3788] Time spent loading block containers with low live blocks: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:58:21.967489 21745 fs_manager.cc:730] Time spent opening block manager: real 0.004s	user 0.003s	sys 0.000s
I20250411 13:58:21.967927 21745 fs_manager.cc:647] Opened local filesystem: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-2/data,/tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-2/wal
uuid: "24b34363536c426fbcc89f0ccf75e9dc"
format_stamp: "Formatted at 2025-04-11 13:58:21 on dist-test-slave-jcj2"
I20250411 13:58:21.968415 21745 fs_report.cc:389] FS layout report
--------------------
wal directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-2/wal
metadata directory: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-2/wal
1 data directories: /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-2/data/data
Total live blocks: 0
Total live bytes: 0
Total live bytes (after alignment): 0
Total number of LBM containers: 0 (0 full)
Did not check for missing blocks
Did not check for orphaned blocks
Total full LBM containers with extra space: 0 (0 repaired)
Total full LBM container extra space in bytes: 0 (0 repaired)
Total incomplete LBM containers: 0 (0 repaired)
Total LBM partial records: 0 (0 repaired)
Total corrupted LBM metadata records in RocksDB: 0 (0 repaired)
I20250411 13:58:22.053644 21745 rpc_server.cc:225] running with OpenSSL 1.1.1  11 Sep 2018
I20250411 13:58:22.055305 21745 env_posix.cc:2264] Not raising this process' running threads per effective uid limit of 18446744073709551615; it is already as high as it can go
I20250411 13:58:22.055775 21745 kserver.cc:163] Server-wide thread pool size limit: 3276
I20250411 13:58:22.058280 21745 txn_system_client.cc:432] TxnSystemClient initialization is disabled...
I20250411 13:58:22.063102 21745 ts_tablet_manager.cc:579] Loaded tablet metadata (0 total tablets, 0 live tablets)
I20250411 13:58:22.063324 21745 ts_tablet_manager.cc:525] Time spent load tablet metadata: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:58:22.063560 21745 ts_tablet_manager.cc:610] Registered 0 tablets
I20250411 13:58:22.063728 21745 ts_tablet_manager.cc:589] Time spent register tablets: real 0.000s	user 0.000s	sys 0.000s
I20250411 13:58:22.227564 21745 rpc_server.cc:307] RPC server started. Bound to: 127.15.113.3:37467
I20250411 13:58:22.227669 21874 acceptor_pool.cc:272] collecting diagnostics on the listening RPC socket 127.15.113.3:37467 every 8 connection(s)
I20250411 13:58:22.230264 21745 server_base.cc:1166] Dumped server information to /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-2/data/info.pb
I20250411 13:58:22.240639 15812 external_mini_cluster.cc:1413] Started /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu as pid 21745
I20250411 13:58:22.241281 15812 external_mini_cluster.cc:1427] Reading /tmp/dist-test-taskQtg33F/test-tmp/master_replication-itest.0.MasterReplicationAndRpcSizeLimitTest.TabletReports.1744379822517842-15812-0/minicluster-data/ts-2/wal/instance
I20250411 13:58:22.278162 21875 heartbeater.cc:344] Connected to a master server at 127.15.113.60:41163
I20250411 13:58:22.278702 21875 heartbeater.cc:461] Registering TS with master...
I20250411 13:58:22.280036 21875 heartbeater.cc:507] Master 127.15.113.60:41163 requested a full tablet report, sending...
I20250411 13:58:22.283107 21364 ts_manager.cc:194] Registered new tserver with Master: 24b34363536c426fbcc89f0ccf75e9dc (127.15.113.3:37467)
I20250411 13:58:22.284929 21877 heartbeater.cc:344] Connected to a master server at 127.15.113.62:44055
I20250411 13:58:22.285316 21877 heartbeater.cc:461] Registering TS with master...
I20250411 13:58:22.286464 21877 heartbeater.cc:507] Master 127.15.113.62:44055 requested a full tablet report, sending...
I20250411 13:58:22.287470 21876 heartbeater.cc:344] Connected to a master server at 127.15.113.61:46061
I20250411 13:58:22.287783 21876 heartbeater.cc:461] Registering TS with master...
I20250411 13:58:22.288623 21876 heartbeater.cc:507] Master 127.15.113.61:46061 requested a full tablet report, sending...
I20250411 13:58:22.289166 21208 ts_manager.cc:194] Registered new tserver with Master: 24b34363536c426fbcc89f0ccf75e9dc (127.15.113.3:37467)
I20250411 13:58:22.290715 21208 master_service.cc:496] Signed X509 certificate for tserver {username='slave'} at 127.15.113.3:42913
I20250411 13:58:22.291141 21285 ts_manager.cc:194] Registered new tserver with Master: 24b34363536c426fbcc89f0ccf75e9dc (127.15.113.3:37467)
I20250411 13:58:22.296197 15812 external_mini_cluster.cc:934] 3 TS(s) registered with all masters
/home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/integration-tests/master_replication-itest.cc:537: Skipped
test is skipped; set KUDU_ALLOW_SLOW_TESTS=1 to run
I20250411 13:58:22.347869 15812 external_mini_cluster.cc:1620] Killing /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu with pid 21445
I20250411 13:58:22.381585 15812 external_mini_cluster.cc:1620] Killing /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu with pid 21603
I20250411 13:58:22.414179 15812 external_mini_cluster.cc:1620] Killing /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu with pid 21745
I20250411 13:58:22.444481 15812 external_mini_cluster.cc:1620] Killing /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu with pid 21175
I20250411 13:58:22.470773 15812 external_mini_cluster.cc:1620] Killing /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu with pid 21247
I20250411 13:58:22.495105 15812 external_mini_cluster.cc:1620] Killing /tmp/dist-test-taskQtg33F/build/tsan/bin/kudu with pid 21324
2025-04-11T13:58:22Z chronyd exiting
[  SKIPPED ] MasterReplicationAndRpcSizeLimitTest.TabletReports (12671 ms)
[----------] 2 tests from MasterReplicationAndRpcSizeLimitTest (28317 ms total)

[----------] Global test environment tear-down
[==========] 9 tests from 2 test suites ran. (79885 ms total)
[  PASSED  ] 8 tests.
[  SKIPPED ] 1 test, listed below:
[  SKIPPED ] MasterReplicationAndRpcSizeLimitTest.TabletReports
I20250411 13:58:22.556661 15812 logging.cc:424] LogThrottler /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/tablet/tablet.cc:2367: suppressed but not reported on 7 messages since previous log ~39 seconds ago
I20250411 13:58:22.556914 15812 logging.cc:424] LogThrottler /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/proxy.cc:239: suppressed but not reported on 20 messages since previous log ~32 seconds ago
==================
WARNING: ThreadSanitizer: data race (pid=15812)
  Write of size 8 at 0x7b0800003cc8 by main thread:
    #0 operator delete(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_new_delete.cpp:126 (master_replication-itest+0x342b39)
    #1 std::__1::_DeallocateCaller::__do_call(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:334:12 (master_replication-itest+0x34eac9)
    #2 std::__1::_DeallocateCaller::__do_deallocate_handle_size(void*, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:292:12 (master_replication-itest+0x34ea69)
    #3 std::__1::_DeallocateCaller::__do_deallocate_handle_size_align(void*, unsigned long, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:268:14 (libkrpc.so+0x126292)
    #4 std::__1::__libcpp_deallocate(void*, unsigned long, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:340:3 (libkrpc.so+0x126229)
    #5 std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> >::deallocate(std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1798:10 (libkrpc.so+0x139f79)
    #6 std::__1::allocator_traits<std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> > >::deallocate(std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> >&, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1533:14 (libkrpc.so+0x139e89)
    #7 std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::destroy(std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1837:9 (libkrpc.so+0x139d71)
    #8 std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::~__tree() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1824:3 (libkrpc.so+0x139ce4)
    #9 std::__1::set<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::~set() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/set:605:5 (libkrpc.so+0x133129)
    #10 cxa_at_exit_wrapper(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:394 (master_replication-itest+0x2c28bf)

  Previous read of size 8 at 0x7b0800003cc8 by thread T264:
    #0 std::__1::__tree_end_node<std::__1::__tree_node_base<void*>*>* std::__1::__tree_next_iter<std::__1::__tree_end_node<std::__1::__tree_node_base<void*>*>*, std::__1::__tree_node_base<void*>*>(std::__1::__tree_node_base<void*>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:182:14 (libkrpc.so+0x13db3a)
    #1 std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long>::operator++() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:929:11 (libkrpc.so+0x133df2)
    #2 void std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::__assign_multi<std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long> >(std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long>, std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long>) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1696:31 (libkrpc.so+0x13bcb2)
    #3 std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::operator=(std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> > const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1648:9 (libkrpc.so+0x13bb09)
    #4 std::__1::set<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::operator=(std::__1::set<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> > const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/set:540:21 (libkrpc.so+0x133c10)
    #5 kudu::rpc::ClientNegotiation::SendNegotiate() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/client_negotiation.cc:327:20 (libkrpc.so+0x12b3b6)
    #6 kudu::rpc::ClientNegotiation::Negotiate(std::__1::unique_ptr<kudu::rpc::ErrorStatusPB, std::__1::default_delete<kudu::rpc::ErrorStatusPB> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/client_negotiation.cc:185:5 (libkrpc.so+0x12abeb)
    #7 kudu::rpc::DoClientNegotiation(kudu::rpc::Connection*, kudu::TriStateFlag, kudu::TriStateFlag, bool, kudu::MonoTime, std::__1::unique_ptr<kudu::rpc::ErrorStatusPB, std::__1::default_delete<kudu::rpc::ErrorStatusPB> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/negotiation.cc:231:3 (libkrpc.so+0x180d5b)
    #8 kudu::rpc::Negotiation::RunNegotiation(scoped_refptr<kudu::rpc::Connection> const&, kudu::TriStateFlag, kudu::TriStateFlag, bool, kudu::MonoTime) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/negotiation.cc:317:9 (libkrpc.so+0x17fea2)
    #9 kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:631:3 (libkrpc.so+0x19f37c)
    #10 decltype(std::__1::forward<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&>(fp)()) std::__1::__invoke<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&>(kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/type_traits:3905:1 (libkrpc.so+0x19f2f9)
    #11 void std::__1::__invoke_void_return_wrapper<void>::__call<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&>(kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__functional_base:348:9 (libkrpc.so+0x19f289)
    #12 std::__1::__function::__alloc_func<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1, std::__1::allocator<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1557:16 (libkrpc.so+0x19f251)
    #13 std::__1::__function::__func<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1, std::__1::allocator<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1731:12 (libkrpc.so+0x19e52d)
    #14 std::__1::__function::__value_func<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1884:16 (libtserver_test_util.so+0x60694)
    #15 std::__1::function<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:2556:12 (libtserver_test_util.so+0x604c9)
    #16 kudu::ThreadPool::DispatchThread() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:837:7 (libkudu_util.so+0x467df4)
    #17 kudu::ThreadPool::CreateThread()::$_2::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:919:48 (libkudu_util.so+0x46b261)
    #18 decltype(std::__1::forward<kudu::ThreadPool::CreateThread()::$_2&>(fp)()) std::__1::__invoke<kudu::ThreadPool::CreateThread()::$_2&>(kudu::ThreadPool::CreateThread()::$_2&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/type_traits:3905:1 (libkudu_util.so+0x46b219)
    #19 void std::__1::__invoke_void_return_wrapper<void>::__call<kudu::ThreadPool::CreateThread()::$_2&>(kudu::ThreadPool::CreateThread()::$_2&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__functional_base:348:9 (libkudu_util.so+0x46b1a9)
    #20 std::__1::__function::__alloc_func<kudu::ThreadPool::CreateThread()::$_2, std::__1::allocator<kudu::ThreadPool::CreateThread()::$_2>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1557:16 (libkudu_util.so+0x46b171)
    #21 std::__1::__function::__func<kudu::ThreadPool::CreateThread()::$_2, std::__1::allocator<kudu::ThreadPool::CreateThread()::$_2>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1731:12 (libkudu_util.so+0x46a46d)
    #22 std::__1::__function::__value_func<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1884:16 (libtserver_test_util.so+0x60694)
    #23 std::__1::function<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:2556:12 (libtserver_test_util.so+0x604c9)
    #24 kudu::Thread::SuperviseThread(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.cc:698:3 (libkudu_util.so+0x44a116)

  As if synchronized via sleep:
    #0 nanosleep /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:363 (master_replication-itest+0x2e7da2)
    #1 base::SleepForNanoseconds(long) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/gutil/sysinfo.cc:96:10 (libgutil.so+0xb5f0a)
    #2 kudu::SleepFor(kudu::MonoDelta const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/monotime.cc:264:3 (libkudu_util.so+0x3ddd86)
    #3 kudu::Subprocess::KillAndWait(int) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/subprocess.cc:692:7 (libkudu_util.so+0x43c190)
    #4 kudu::clock::MiniChronyd::Stop() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/clock/test/mini_chronyd.cc:251:16 (libmini_chronyd.so+0x15190)
    #5 kudu::clock::MiniChronyd::~MiniChronyd() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/clock/test/mini_chronyd.cc:166:5 (libmini_chronyd.so+0x14e8e)
    #6 std::__1::default_delete<kudu::clock::MiniChronyd>::operator()(kudu::clock::MiniChronyd*) const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2262:5 (libmini_cluster.so+0xc18fe)
    #7 std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >::reset(kudu::clock::MiniChronyd*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2517:7 (libmini_cluster.so+0xc186d)
    #8 std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >::~unique_ptr() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2471:19 (libmini_cluster.so+0x9cc2b)
    #9 std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >::destroy(std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1811:92 (libmini_cluster.so+0xb2169)
    #10 void std::__1::allocator_traits<std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::__destroy<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >(std::__1::integral_constant<bool, true>, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >&, std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1703:21 (libmini_cluster.so+0xb2140)
    #11 void std::__1::allocator_traits<std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::destroy<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >(std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >&, std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1544:14 (libmini_cluster.so+0xb2100)
    #12 std::__1::__vector_base<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::__destruct_at_end(std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:428:9 (libmini_cluster.so+0xb20b6)
    #13 std::__1::__vector_base<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::clear() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:371:29 (libmini_cluster.so+0xb1fd4)
    #14 std::__1::__vector_base<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::~__vector_base() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:465:9 (libmini_cluster.so+0xb1dcb)
    #15 std::__1::vector<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::~vector() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:557:5 (libmini_cluster.so+0x98161)
    #16 kudu::cluster::ExternalMiniCluster::~ExternalMiniCluster() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/mini-cluster/external_mini_cluster.cc:173:1 (libmini_cluster.so+0x8457e)
    #17 kudu::cluster::ExternalMiniCluster::~ExternalMiniCluster() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/mini-cluster/external_mini_cluster.cc:171:45 (libmini_cluster.so+0x84689)
    #18 std::__1::default_delete<kudu::cluster::ExternalMiniCluster>::operator()(kudu::cluster::ExternalMiniCluster*) const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2262:5 (master_replication-itest+0x355737)
    #19 std::__1::unique_ptr<kudu::cluster::ExternalMiniCluster, std::__1::default_delete<kudu::cluster::ExternalMiniCluster> >::reset(kudu::cluster::ExternalMiniCluster*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2517:7 (master_replication-itest+0x35569d)
    #20 std::__1::unique_ptr<kudu::cluster::ExternalMiniCluster, std::__1::default_delete<kudu::cluster::ExternalMiniCluster> >::~unique_ptr() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2471:19 (master_replication-itest+0x35552b)
    #21 kudu::master::MasterReplicationAndRpcSizeLimitTest::~MasterReplicationAndRpcSizeLimitTest() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/integration-tests/master_replication-itest.cc:370:7 (master_replication-itest+0x34dff6)
    #22 kudu::master::MasterReplicationAndRpcSizeLimitTest_TabletReports_Test::~MasterReplicationAndRpcSizeLimitTest_TabletReports_Test() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/integration-tests/master_replication-itest.cc:536:1 (master_replication-itest+0x34e029)
    #23 testing::Test::DeleteSelf_() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/include/gtest/gtest.h:318:24 (libgtest.so.1.12.1+0x65467)
    #24 void testing::internal::HandleSehExceptionsInMethodIfSupported<testing::Test, void>(testing::Test*, void (testing::Test::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2599:10 (libgtest.so.1.12.1+0x64e2f)
    #25 void testing::internal::HandleExceptionsInMethodIfSupported<testing::Test, void>(testing::Test*, void (testing::Test::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2635:14 (libgtest.so.1.12.1+0x64e2f)
    #26 testing::TestInfo::Run() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2859:5 (libgtest.so.1.12.1+0x43dc2)
    #27 testing::TestSuite::Run() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:3012:30 (libgtest.so.1.12.1+0x44d24)
    #28 testing::internal::UnitTestImpl::RunAllTests() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:5870:44 (libgtest.so.1.12.1+0x59814)
    #29 bool testing::internal::HandleSehExceptionsInMethodIfSupported<testing::internal::UnitTestImpl, bool>(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2599:10 (libgtest.so.1.12.1+0x65cef)
    #30 bool testing::internal::HandleExceptionsInMethodIfSupported<testing::internal::UnitTestImpl, bool>(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2635:14 (libgtest.so.1.12.1+0x65cef)
    #31 testing::UnitTest::Run() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:5444:10 (libgtest.so.1.12.1+0x58dcc)
    #32 RUN_ALL_TESTS() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/gtest/gtest.h:2293:73 (master_replication-itest+0x36544b)
    #33 main /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/test_main.cc:109:10 (master_replication-itest+0x36434c)

  Thread T264 'client-negotiat' (tid=21891, finished) created by thread T102 at:
    #0 pthread_create /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:966 (master_replication-itest+0x2c6875)
    #1 kudu::Thread::StartThread(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::function<void ()>, unsigned long, scoped_refptr<kudu::Thread>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.cc:642:15 (libkudu_util.so+0x44997a)
    #2 kudu::Thread::Create(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::function<void ()>, scoped_refptr<kudu::Thread>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.h:146:12 (libmaster.so+0x2d2ae9)
    #3 kudu::ThreadPool::CreateThread() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:918:10 (libkudu_util.so+0x46702b)
    #4 kudu::ThreadPool::DoSubmit(std::__1::function<void ()>, kudu::ThreadPoolToken*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:703:21 (libkudu_util.so+0x464f7f)
    #5 kudu::ThreadPool::Submit(std::__1::function<void ()>) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:584:10 (libkudu_util.so+0x467592)
    #6 kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:631:3 (libkrpc.so+0x199288)
    #7 kudu::rpc::ReactorThread::FindOrStartConnection(kudu::rpc::ConnectionId const&, kudu::rpc::CredentialsPolicy, scoped_refptr<kudu::rpc::Connection>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:591:14 (libkrpc.so+0x199ce1)
    #8 kudu::rpc::ReactorThread::AssignOutboundCall(std::__1::shared_ptr<kudu::rpc::OutboundCall>) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:375:14 (libkrpc.so+0x1998a6)
    #9 kudu::rpc::AssignOutboundCallTask::Run(kudu::rpc::ReactorThread*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:907:14 (libkrpc.so+0x1a514e)
    #10 kudu::rpc::ReactorThread::AsyncHandler(ev::async&, int) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:349:10 (libkrpc.so+0x198c12)
    #11 void ev::base<ev_async, ev::async>::method_thunk<kudu::rpc::ReactorThread, &(kudu::rpc::ReactorThread::AsyncHandler(ev::async&, int))>(ev_loop*, ev_async*, int) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/ev++.h:497:7 (libkrpc.so+0x1a6373)
    #12 ev_invoke_pending /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/libev-4.33/ev.c:3770:11 (libev.so.4+0x9d31)
    #13 kudu::rpc::ReactorThread::InvokePendingCb(ev_loop*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:205:3 (libkrpc.so+0x197ae5)
    #14 ev_run /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/libev-4.33/ev.c:4190:7 (libev.so.4+0xaf9a)
    #15 ev::loop_ref::run(int) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/ev++.h:211:7 (libkrpc.so+0x1a2bf8)
    #16 kudu::rpc::ReactorThread::RunThread() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:510:9 (libkrpc.so+0x19b0c5)
    #17 kudu::rpc::ReactorThread::Init()::$_0::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:197:48 (libkrpc.so+0x19de11)
    #18 decltype(std::__1::forward<kudu::rpc::ReactorThread::Init()::$_0&>(fp)()) std::__1::__invoke<kudu::rpc::ReactorThread::Init()::$_0&>(kudu::rpc::ReactorThread::Init()::$_0&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/type_traits:3905:1 (libkrpc.so+0x19ddc9)
    #19 void std::__1::__invoke_void_return_wrapper<void>::__call<kudu::rpc::ReactorThread::Init()::$_0&>(kudu::rpc::ReactorThread::Init()::$_0&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__functional_base:348:9 (libkrpc.so+0x19dd59)
    #20 std::__1::__function::__alloc_func<kudu::rpc::ReactorThread::Init()::$_0, std::__1::allocator<kudu::rpc::ReactorThread::Init()::$_0>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1557:16 (libkrpc.so+0x19dd21)
    #21 std::__1::__function::__func<kudu::rpc::ReactorThread::Init()::$_0, std::__1::allocator<kudu::rpc::ReactorThread::Init()::$_0>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1731:12 (libkrpc.so+0x19d01d)
    #22 std::__1::__function::__value_func<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1884:16 (libtserver_test_util.so+0x60694)
    #23 std::__1::function<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:2556:12 (libtserver_test_util.so+0x604c9)
    #24 kudu::Thread::SuperviseThread(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.cc:698:3 (libkudu_util.so+0x44a116)

SUMMARY: ThreadSanitizer: data race /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_new_delete.cpp:126 in operator delete(void*)
==================
==================
WARNING: ThreadSanitizer: data race (pid=15812)
  Write of size 8 at 0x7b0800003cd0 by main thread:
    #0 operator delete(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_new_delete.cpp:126 (master_replication-itest+0x342b39)
    #1 std::__1::_DeallocateCaller::__do_call(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:334:12 (master_replication-itest+0x34eac9)
    #2 std::__1::_DeallocateCaller::__do_deallocate_handle_size(void*, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:292:12 (master_replication-itest+0x34ea69)
    #3 std::__1::_DeallocateCaller::__do_deallocate_handle_size_align(void*, unsigned long, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:268:14 (libkrpc.so+0x126292)
    #4 std::__1::__libcpp_deallocate(void*, unsigned long, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:340:3 (libkrpc.so+0x126229)
    #5 std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> >::deallocate(std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1798:10 (libkrpc.so+0x139f79)
    #6 std::__1::allocator_traits<std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> > >::deallocate(std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> >&, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1533:14 (libkrpc.so+0x139e89)
    #7 std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::destroy(std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1837:9 (libkrpc.so+0x139d71)
    #8 std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::~__tree() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1824:3 (libkrpc.so+0x139ce4)
    #9 std::__1::set<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::~set() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/set:605:5 (libkrpc.so+0x133129)
    #10 cxa_at_exit_wrapper(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:394 (master_replication-itest+0x2c28bf)

  Previous read of size 8 at 0x7b0800003cd0 by thread T264:
    #0 bool std::__1::__tree_is_left_child<std::__1::__tree_node_base<void*>*>(std::__1::__tree_node_base<void*>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:81:24 (libkrpc.so+0x13c67b)
    #1 std::__1::__tree_end_node<std::__1::__tree_node_base<void*>*>* std::__1::__tree_next_iter<std::__1::__tree_end_node<std::__1::__tree_node_base<void*>*>*, std::__1::__tree_node_base<void*>*>(std::__1::__tree_node_base<void*>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:184:13 (libkrpc.so+0x13db64)
    #2 std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long>::operator++() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:929:11 (libkrpc.so+0x133df2)
    #3 void std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::__assign_multi<std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long> >(std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long>, std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long>) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1696:31 (libkrpc.so+0x13bcb2)
    #4 std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::operator=(std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> > const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1648:9 (libkrpc.so+0x13bb09)
    #5 std::__1::set<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::operator=(std::__1::set<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> > const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/set:540:21 (libkrpc.so+0x133c10)
    #6 kudu::rpc::ClientNegotiation::SendNegotiate() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/client_negotiation.cc:327:20 (libkrpc.so+0x12b3b6)
    #7 kudu::rpc::ClientNegotiation::Negotiate(std::__1::unique_ptr<kudu::rpc::ErrorStatusPB, std::__1::default_delete<kudu::rpc::ErrorStatusPB> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/client_negotiation.cc:185:5 (libkrpc.so+0x12abeb)
    #8 kudu::rpc::DoClientNegotiation(kudu::rpc::Connection*, kudu::TriStateFlag, kudu::TriStateFlag, bool, kudu::MonoTime, std::__1::unique_ptr<kudu::rpc::ErrorStatusPB, std::__1::default_delete<kudu::rpc::ErrorStatusPB> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/negotiation.cc:231:3 (libkrpc.so+0x180d5b)
    #9 kudu::rpc::Negotiation::RunNegotiation(scoped_refptr<kudu::rpc::Connection> const&, kudu::TriStateFlag, kudu::TriStateFlag, bool, kudu::MonoTime) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/negotiation.cc:317:9 (libkrpc.so+0x17fea2)
    #10 kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:631:3 (libkrpc.so+0x19f37c)
    #11 decltype(std::__1::forward<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&>(fp)()) std::__1::__invoke<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&>(kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/type_traits:3905:1 (libkrpc.so+0x19f2f9)
    #12 void std::__1::__invoke_void_return_wrapper<void>::__call<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&>(kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__functional_base:348:9 (libkrpc.so+0x19f289)
    #13 std::__1::__function::__alloc_func<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1, std::__1::allocator<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1557:16 (libkrpc.so+0x19f251)
    #14 std::__1::__function::__func<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1, std::__1::allocator<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1731:12 (libkrpc.so+0x19e52d)
    #15 std::__1::__function::__value_func<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1884:16 (libtserver_test_util.so+0x60694)
    #16 std::__1::function<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:2556:12 (libtserver_test_util.so+0x604c9)
    #17 kudu::ThreadPool::DispatchThread() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:837:7 (libkudu_util.so+0x467df4)
    #18 kudu::ThreadPool::CreateThread()::$_2::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:919:48 (libkudu_util.so+0x46b261)
    #19 decltype(std::__1::forward<kudu::ThreadPool::CreateThread()::$_2&>(fp)()) std::__1::__invoke<kudu::ThreadPool::CreateThread()::$_2&>(kudu::ThreadPool::CreateThread()::$_2&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/type_traits:3905:1 (libkudu_util.so+0x46b219)
    #20 void std::__1::__invoke_void_return_wrapper<void>::__call<kudu::ThreadPool::CreateThread()::$_2&>(kudu::ThreadPool::CreateThread()::$_2&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__functional_base:348:9 (libkudu_util.so+0x46b1a9)
    #21 std::__1::__function::__alloc_func<kudu::ThreadPool::CreateThread()::$_2, std::__1::allocator<kudu::ThreadPool::CreateThread()::$_2>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1557:16 (libkudu_util.so+0x46b171)
    #22 std::__1::__function::__func<kudu::ThreadPool::CreateThread()::$_2, std::__1::allocator<kudu::ThreadPool::CreateThread()::$_2>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1731:12 (libkudu_util.so+0x46a46d)
    #23 std::__1::__function::__value_func<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1884:16 (libtserver_test_util.so+0x60694)
    #24 std::__1::function<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:2556:12 (libtserver_test_util.so+0x604c9)
    #25 kudu::Thread::SuperviseThread(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.cc:698:3 (libkudu_util.so+0x44a116)

  As if synchronized via sleep:
    #0 nanosleep /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:363 (master_replication-itest+0x2e7da2)
    #1 base::SleepForNanoseconds(long) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/gutil/sysinfo.cc:96:10 (libgutil.so+0xb5f0a)
    #2 kudu::SleepFor(kudu::MonoDelta const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/monotime.cc:264:3 (libkudu_util.so+0x3ddd86)
    #3 kudu::Subprocess::KillAndWait(int) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/subprocess.cc:692:7 (libkudu_util.so+0x43c190)
    #4 kudu::clock::MiniChronyd::Stop() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/clock/test/mini_chronyd.cc:251:16 (libmini_chronyd.so+0x15190)
    #5 kudu::clock::MiniChronyd::~MiniChronyd() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/clock/test/mini_chronyd.cc:166:5 (libmini_chronyd.so+0x14e8e)
    #6 std::__1::default_delete<kudu::clock::MiniChronyd>::operator()(kudu::clock::MiniChronyd*) const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2262:5 (libmini_cluster.so+0xc18fe)
    #7 std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >::reset(kudu::clock::MiniChronyd*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2517:7 (libmini_cluster.so+0xc186d)
    #8 std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >::~unique_ptr() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2471:19 (libmini_cluster.so+0x9cc2b)
    #9 std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >::destroy(std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1811:92 (libmini_cluster.so+0xb2169)
    #10 void std::__1::allocator_traits<std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::__destroy<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >(std::__1::integral_constant<bool, true>, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >&, std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1703:21 (libmini_cluster.so+0xb2140)
    #11 void std::__1::allocator_traits<std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::destroy<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >(std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >&, std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1544:14 (libmini_cluster.so+0xb2100)
    #12 std::__1::__vector_base<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::__destruct_at_end(std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:428:9 (libmini_cluster.so+0xb20b6)
    #13 std::__1::__vector_base<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::clear() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:371:29 (libmini_cluster.so+0xb1fd4)
    #14 std::__1::__vector_base<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::~__vector_base() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:465:9 (libmini_cluster.so+0xb1dcb)
    #15 std::__1::vector<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::~vector() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:557:5 (libmini_cluster.so+0x98161)
    #16 kudu::cluster::ExternalMiniCluster::~ExternalMiniCluster() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/mini-cluster/external_mini_cluster.cc:173:1 (libmini_cluster.so+0x8457e)
    #17 kudu::cluster::ExternalMiniCluster::~ExternalMiniCluster() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/mini-cluster/external_mini_cluster.cc:171:45 (libmini_cluster.so+0x84689)
    #18 std::__1::default_delete<kudu::cluster::ExternalMiniCluster>::operator()(kudu::cluster::ExternalMiniCluster*) const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2262:5 (master_replication-itest+0x355737)
    #19 std::__1::unique_ptr<kudu::cluster::ExternalMiniCluster, std::__1::default_delete<kudu::cluster::ExternalMiniCluster> >::reset(kudu::cluster::ExternalMiniCluster*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2517:7 (master_replication-itest+0x35569d)
    #20 std::__1::unique_ptr<kudu::cluster::ExternalMiniCluster, std::__1::default_delete<kudu::cluster::ExternalMiniCluster> >::~unique_ptr() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2471:19 (master_replication-itest+0x35552b)
    #21 kudu::master::MasterReplicationAndRpcSizeLimitTest::~MasterReplicationAndRpcSizeLimitTest() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/integration-tests/master_replication-itest.cc:370:7 (master_replication-itest+0x34dff6)
    #22 kudu::master::MasterReplicationAndRpcSizeLimitTest_TabletReports_Test::~MasterReplicationAndRpcSizeLimitTest_TabletReports_Test() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/integration-tests/master_replication-itest.cc:536:1 (master_replication-itest+0x34e029)
    #23 testing::Test::DeleteSelf_() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/include/gtest/gtest.h:318:24 (libgtest.so.1.12.1+0x65467)
    #24 void testing::internal::HandleSehExceptionsInMethodIfSupported<testing::Test, void>(testing::Test*, void (testing::Test::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2599:10 (libgtest.so.1.12.1+0x64e2f)
    #25 void testing::internal::HandleExceptionsInMethodIfSupported<testing::Test, void>(testing::Test*, void (testing::Test::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2635:14 (libgtest.so.1.12.1+0x64e2f)
    #26 testing::TestInfo::Run() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2859:5 (libgtest.so.1.12.1+0x43dc2)
    #27 testing::TestSuite::Run() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:3012:30 (libgtest.so.1.12.1+0x44d24)
    #28 testing::internal::UnitTestImpl::RunAllTests() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:5870:44 (libgtest.so.1.12.1+0x59814)
    #29 bool testing::internal::HandleSehExceptionsInMethodIfSupported<testing::internal::UnitTestImpl, bool>(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2599:10 (libgtest.so.1.12.1+0x65cef)
    #30 bool testing::internal::HandleExceptionsInMethodIfSupported<testing::internal::UnitTestImpl, bool>(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2635:14 (libgtest.so.1.12.1+0x65cef)
    #31 testing::UnitTest::Run() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:5444:10 (libgtest.so.1.12.1+0x58dcc)
    #32 RUN_ALL_TESTS() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/gtest/gtest.h:2293:73 (master_replication-itest+0x36544b)
    #33 main /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/test_main.cc:109:10 (master_replication-itest+0x36434c)

  Thread T264 'client-negotiat' (tid=21891, finished) created by thread T102 at:
    #0 pthread_create /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:966 (master_replication-itest+0x2c6875)
    #1 kudu::Thread::StartThread(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::function<void ()>, unsigned long, scoped_refptr<kudu::Thread>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.cc:642:15 (libkudu_util.so+0x44997a)
    #2 kudu::Thread::Create(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::function<void ()>, scoped_refptr<kudu::Thread>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.h:146:12 (libmaster.so+0x2d2ae9)
    #3 kudu::ThreadPool::CreateThread() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:918:10 (libkudu_util.so+0x46702b)
    #4 kudu::ThreadPool::DoSubmit(std::__1::function<void ()>, kudu::ThreadPoolToken*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:703:21 (libkudu_util.so+0x464f7f)
    #5 kudu::ThreadPool::Submit(std::__1::function<void ()>) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:584:10 (libkudu_util.so+0x467592)
    #6 kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:631:3 (libkrpc.so+0x199288)
    #7 kudu::rpc::ReactorThread::FindOrStartConnection(kudu::rpc::ConnectionId const&, kudu::rpc::CredentialsPolicy, scoped_refptr<kudu::rpc::Connection>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:591:14 (libkrpc.so+0x199ce1)
    #8 kudu::rpc::ReactorThread::AssignOutboundCall(std::__1::shared_ptr<kudu::rpc::OutboundCall>) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:375:14 (libkrpc.so+0x1998a6)
    #9 kudu::rpc::AssignOutboundCallTask::Run(kudu::rpc::ReactorThread*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:907:14 (libkrpc.so+0x1a514e)
    #10 kudu::rpc::ReactorThread::AsyncHandler(ev::async&, int) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:349:10 (libkrpc.so+0x198c12)
    #11 void ev::base<ev_async, ev::async>::method_thunk<kudu::rpc::ReactorThread, &(kudu::rpc::ReactorThread::AsyncHandler(ev::async&, int))>(ev_loop*, ev_async*, int) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/ev++.h:497:7 (libkrpc.so+0x1a6373)
    #12 ev_invoke_pending /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/libev-4.33/ev.c:3770:11 (libev.so.4+0x9d31)
    #13 kudu::rpc::ReactorThread::InvokePendingCb(ev_loop*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:205:3 (libkrpc.so+0x197ae5)
    #14 ev_run /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/libev-4.33/ev.c:4190:7 (libev.so.4+0xaf9a)
    #15 ev::loop_ref::run(int) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/ev++.h:211:7 (libkrpc.so+0x1a2bf8)
    #16 kudu::rpc::ReactorThread::RunThread() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:510:9 (libkrpc.so+0x19b0c5)
    #17 kudu::rpc::ReactorThread::Init()::$_0::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:197:48 (libkrpc.so+0x19de11)
    #18 decltype(std::__1::forward<kudu::rpc::ReactorThread::Init()::$_0&>(fp)()) std::__1::__invoke<kudu::rpc::ReactorThread::Init()::$_0&>(kudu::rpc::ReactorThread::Init()::$_0&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/type_traits:3905:1 (libkrpc.so+0x19ddc9)
    #19 void std::__1::__invoke_void_return_wrapper<void>::__call<kudu::rpc::ReactorThread::Init()::$_0&>(kudu::rpc::ReactorThread::Init()::$_0&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__functional_base:348:9 (libkrpc.so+0x19dd59)
    #20 std::__1::__function::__alloc_func<kudu::rpc::ReactorThread::Init()::$_0, std::__1::allocator<kudu::rpc::ReactorThread::Init()::$_0>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1557:16 (libkrpc.so+0x19dd21)
    #21 std::__1::__function::__func<kudu::rpc::ReactorThread::Init()::$_0, std::__1::allocator<kudu::rpc::ReactorThread::Init()::$_0>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1731:12 (libkrpc.so+0x19d01d)
    #22 std::__1::__function::__value_func<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1884:16 (libtserver_test_util.so+0x60694)
    #23 std::__1::function<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:2556:12 (libtserver_test_util.so+0x604c9)
    #24 kudu::Thread::SuperviseThread(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.cc:698:3 (libkudu_util.so+0x44a116)

SUMMARY: ThreadSanitizer: data race /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_new_delete.cpp:126 in operator delete(void*)
==================
==================
WARNING: ThreadSanitizer: data race (pid=15812)
  Write of size 8 at 0x7b0800003cd8 by main thread:
    #0 operator delete(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_new_delete.cpp:126 (master_replication-itest+0x342b39)
    #1 std::__1::_DeallocateCaller::__do_call(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:334:12 (master_replication-itest+0x34eac9)
    #2 std::__1::_DeallocateCaller::__do_deallocate_handle_size(void*, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:292:12 (master_replication-itest+0x34ea69)
    #3 std::__1::_DeallocateCaller::__do_deallocate_handle_size_align(void*, unsigned long, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:268:14 (libkrpc.so+0x126292)
    #4 std::__1::__libcpp_deallocate(void*, unsigned long, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/new:340:3 (libkrpc.so+0x126229)
    #5 std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> >::deallocate(std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1798:10 (libkrpc.so+0x139f79)
    #6 std::__1::allocator_traits<std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> > >::deallocate(std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> >&, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, unsigned long) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1533:14 (libkrpc.so+0x139e89)
    #7 std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::destroy(std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1837:9 (libkrpc.so+0x139d71)
    #8 std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::~__tree() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1824:3 (libkrpc.so+0x139ce4)
    #9 std::__1::set<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::~set() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/set:605:5 (libkrpc.so+0x133129)
    #10 cxa_at_exit_wrapper(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:394 (master_replication-itest+0x2c28bf)

  Previous read of size 4 at 0x7b0800003cdc by thread T264:
    #0 void std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> >::construct<kudu::rpc::RpcFeatureFlag, kudu::rpc::RpcFeatureFlag const&>(kudu::rpc::RpcFeatureFlag*, kudu::rpc::RpcFeatureFlag const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1809:35 (libkrpc.so+0x13d08b)
    #1 void std::__1::allocator_traits<std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> > >::__construct<kudu::rpc::RpcFeatureFlag, kudu::rpc::RpcFeatureFlag const&>(std::__1::integral_constant<bool, true>, std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> >&, kudu::rpc::RpcFeatureFlag*, kudu::rpc::RpcFeatureFlag const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1687:21 (libkrpc.so+0x13d041)
    #2 void std::__1::allocator_traits<std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> > >::construct<kudu::rpc::RpcFeatureFlag, kudu::rpc::RpcFeatureFlag const&>(std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> >&, kudu::rpc::RpcFeatureFlag*, kudu::rpc::RpcFeatureFlag const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1538:14 (libkrpc.so+0x13cdd1)
    #3 std::__1::unique_ptr<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>, std::__1::__tree_node_destructor<std::__1::allocator<std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*> > > > std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::__construct_node<kudu::rpc::RpcFeatureFlag const&>(kudu::rpc::RpcFeatureFlag const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:2194:5 (libkrpc.so+0x13cb28)
    #4 std::__1::__tree_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long> std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::__emplace_multi<kudu::rpc::RpcFeatureFlag const&>(kudu::rpc::RpcFeatureFlag const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:2242:25 (libkrpc.so+0x13c9d6)
    #5 std::__1::__tree_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long> std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::__insert_multi<kudu::rpc::RpcFeatureFlag const&>(kudu::rpc::RpcFeatureFlag const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1326:16 (libkrpc.so+0x13c048)
    #6 void std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::__assign_multi<std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long> >(std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long>, std::__1::__tree_const_iterator<kudu::rpc::RpcFeatureFlag, std::__1::__tree_node<kudu::rpc::RpcFeatureFlag, void*>*, long>) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1697:9 (libkrpc.so+0x13bcaa)
    #7 std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::operator=(std::__1::__tree<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> > const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__tree:1648:9 (libkrpc.so+0x13bb09)
    #8 std::__1::set<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> >::operator=(std::__1::set<kudu::rpc::RpcFeatureFlag, std::__1::less<kudu::rpc::RpcFeatureFlag>, std::__1::allocator<kudu::rpc::RpcFeatureFlag> > const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/set:540:21 (libkrpc.so+0x133c10)
    #9 kudu::rpc::ClientNegotiation::SendNegotiate() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/client_negotiation.cc:327:20 (libkrpc.so+0x12b3b6)
    #10 kudu::rpc::ClientNegotiation::Negotiate(std::__1::unique_ptr<kudu::rpc::ErrorStatusPB, std::__1::default_delete<kudu::rpc::ErrorStatusPB> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/client_negotiation.cc:185:5 (libkrpc.so+0x12abeb)
    #11 kudu::rpc::DoClientNegotiation(kudu::rpc::Connection*, kudu::TriStateFlag, kudu::TriStateFlag, bool, kudu::MonoTime, std::__1::unique_ptr<kudu::rpc::ErrorStatusPB, std::__1::default_delete<kudu::rpc::ErrorStatusPB> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/negotiation.cc:231:3 (libkrpc.so+0x180d5b)
    #12 kudu::rpc::Negotiation::RunNegotiation(scoped_refptr<kudu::rpc::Connection> const&, kudu::TriStateFlag, kudu::TriStateFlag, bool, kudu::MonoTime) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/negotiation.cc:317:9 (libkrpc.so+0x17fea2)
    #13 kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:631:3 (libkrpc.so+0x19f37c)
    #14 decltype(std::__1::forward<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&>(fp)()) std::__1::__invoke<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&>(kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/type_traits:3905:1 (libkrpc.so+0x19f2f9)
    #15 void std::__1::__invoke_void_return_wrapper<void>::__call<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&>(kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__functional_base:348:9 (libkrpc.so+0x19f289)
    #16 std::__1::__function::__alloc_func<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1, std::__1::allocator<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1557:16 (libkrpc.so+0x19f251)
    #17 std::__1::__function::__func<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1, std::__1::allocator<kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&)::$_1>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1731:12 (libkrpc.so+0x19e52d)
    #18 std::__1::__function::__value_func<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1884:16 (libtserver_test_util.so+0x60694)
    #19 std::__1::function<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:2556:12 (libtserver_test_util.so+0x604c9)
    #20 kudu::ThreadPool::DispatchThread() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:837:7 (libkudu_util.so+0x467df4)
    #21 kudu::ThreadPool::CreateThread()::$_2::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:919:48 (libkudu_util.so+0x46b261)
    #22 decltype(std::__1::forward<kudu::ThreadPool::CreateThread()::$_2&>(fp)()) std::__1::__invoke<kudu::ThreadPool::CreateThread()::$_2&>(kudu::ThreadPool::CreateThread()::$_2&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/type_traits:3905:1 (libkudu_util.so+0x46b219)
    #23 void std::__1::__invoke_void_return_wrapper<void>::__call<kudu::ThreadPool::CreateThread()::$_2&>(kudu::ThreadPool::CreateThread()::$_2&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__functional_base:348:9 (libkudu_util.so+0x46b1a9)
    #24 std::__1::__function::__alloc_func<kudu::ThreadPool::CreateThread()::$_2, std::__1::allocator<kudu::ThreadPool::CreateThread()::$_2>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1557:16 (libkudu_util.so+0x46b171)
    #25 std::__1::__function::__func<kudu::ThreadPool::CreateThread()::$_2, std::__1::allocator<kudu::ThreadPool::CreateThread()::$_2>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1731:12 (libkudu_util.so+0x46a46d)
    #26 std::__1::__function::__value_func<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1884:16 (libtserver_test_util.so+0x60694)
    #27 std::__1::function<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:2556:12 (libtserver_test_util.so+0x604c9)
    #28 kudu::Thread::SuperviseThread(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.cc:698:3 (libkudu_util.so+0x44a116)

  As if synchronized via sleep:
    #0 nanosleep /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:363 (master_replication-itest+0x2e7da2)
    #1 base::SleepForNanoseconds(long) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/gutil/sysinfo.cc:96:10 (libgutil.so+0xb5f0a)
    #2 kudu::SleepFor(kudu::MonoDelta const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/monotime.cc:264:3 (libkudu_util.so+0x3ddd86)
    #3 kudu::Subprocess::KillAndWait(int) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/subprocess.cc:692:7 (libkudu_util.so+0x43c190)
    #4 kudu::clock::MiniChronyd::Stop() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/clock/test/mini_chronyd.cc:251:16 (libmini_chronyd.so+0x15190)
    #5 kudu::clock::MiniChronyd::~MiniChronyd() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/clock/test/mini_chronyd.cc:166:5 (libmini_chronyd.so+0x14e8e)
    #6 std::__1::default_delete<kudu::clock::MiniChronyd>::operator()(kudu::clock::MiniChronyd*) const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2262:5 (libmini_cluster.so+0xc18fe)
    #7 std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >::reset(kudu::clock::MiniChronyd*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2517:7 (libmini_cluster.so+0xc186d)
    #8 std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >::~unique_ptr() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2471:19 (libmini_cluster.so+0x9cc2b)
    #9 std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >::destroy(std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1811:92 (libmini_cluster.so+0xb2169)
    #10 void std::__1::allocator_traits<std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::__destroy<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >(std::__1::integral_constant<bool, true>, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >&, std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1703:21 (libmini_cluster.so+0xb2140)
    #11 void std::__1::allocator_traits<std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::destroy<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >(std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > >&, std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:1544:14 (libmini_cluster.so+0xb2100)
    #12 std::__1::__vector_base<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::__destruct_at_end(std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:428:9 (libmini_cluster.so+0xb20b6)
    #13 std::__1::__vector_base<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::clear() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:371:29 (libmini_cluster.so+0xb1fd4)
    #14 std::__1::__vector_base<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::~__vector_base() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:465:9 (libmini_cluster.so+0xb1dcb)
    #15 std::__1::vector<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> >, std::__1::allocator<std::__1::unique_ptr<kudu::clock::MiniChronyd, std::__1::default_delete<kudu::clock::MiniChronyd> > > >::~vector() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/vector:557:5 (libmini_cluster.so+0x98161)
    #16 kudu::cluster::ExternalMiniCluster::~ExternalMiniCluster() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/mini-cluster/external_mini_cluster.cc:173:1 (libmini_cluster.so+0x8457e)
    #17 kudu::cluster::ExternalMiniCluster::~ExternalMiniCluster() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/mini-cluster/external_mini_cluster.cc:171:45 (libmini_cluster.so+0x84689)
    #18 std::__1::default_delete<kudu::cluster::ExternalMiniCluster>::operator()(kudu::cluster::ExternalMiniCluster*) const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2262:5 (master_replication-itest+0x355737)
    #19 std::__1::unique_ptr<kudu::cluster::ExternalMiniCluster, std::__1::default_delete<kudu::cluster::ExternalMiniCluster> >::reset(kudu::cluster::ExternalMiniCluster*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2517:7 (master_replication-itest+0x35569d)
    #20 std::__1::unique_ptr<kudu::cluster::ExternalMiniCluster, std::__1::default_delete<kudu::cluster::ExternalMiniCluster> >::~unique_ptr() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/memory:2471:19 (master_replication-itest+0x35552b)
    #21 kudu::master::MasterReplicationAndRpcSizeLimitTest::~MasterReplicationAndRpcSizeLimitTest() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/integration-tests/master_replication-itest.cc:370:7 (master_replication-itest+0x34dff6)
    #22 kudu::master::MasterReplicationAndRpcSizeLimitTest_TabletReports_Test::~MasterReplicationAndRpcSizeLimitTest_TabletReports_Test() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/integration-tests/master_replication-itest.cc:536:1 (master_replication-itest+0x34e029)
    #23 testing::Test::DeleteSelf_() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/include/gtest/gtest.h:318:24 (libgtest.so.1.12.1+0x65467)
    #24 void testing::internal::HandleSehExceptionsInMethodIfSupported<testing::Test, void>(testing::Test*, void (testing::Test::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2599:10 (libgtest.so.1.12.1+0x64e2f)
    #25 void testing::internal::HandleExceptionsInMethodIfSupported<testing::Test, void>(testing::Test*, void (testing::Test::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2635:14 (libgtest.so.1.12.1+0x64e2f)
    #26 testing::TestInfo::Run() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2859:5 (libgtest.so.1.12.1+0x43dc2)
    #27 testing::TestSuite::Run() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:3012:30 (libgtest.so.1.12.1+0x44d24)
    #28 testing::internal::UnitTestImpl::RunAllTests() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:5870:44 (libgtest.so.1.12.1+0x59814)
    #29 bool testing::internal::HandleSehExceptionsInMethodIfSupported<testing::internal::UnitTestImpl, bool>(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2599:10 (libgtest.so.1.12.1+0x65cef)
    #30 bool testing::internal::HandleExceptionsInMethodIfSupported<testing::internal::UnitTestImpl, bool>(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:2635:14 (libgtest.so.1.12.1+0x65cef)
    #31 testing::UnitTest::Run() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/googletest-release-1.12.1/googletest/src/gtest.cc:5444:10 (libgtest.so.1.12.1+0x58dcc)
    #32 RUN_ALL_TESTS() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/gtest/gtest.h:2293:73 (master_replication-itest+0x36544b)
    #33 main /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/test_main.cc:109:10 (master_replication-itest+0x36434c)

  Thread T264 'client-negotiat' (tid=21891, finished) created by thread T102 at:
    #0 pthread_create /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:966 (master_replication-itest+0x2c6875)
    #1 kudu::Thread::StartThread(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::function<void ()>, unsigned long, scoped_refptr<kudu::Thread>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.cc:642:15 (libkudu_util.so+0x44997a)
    #2 kudu::Thread::Create(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::function<void ()>, scoped_refptr<kudu::Thread>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.h:146:12 (libmaster.so+0x2d2ae9)
    #3 kudu::ThreadPool::CreateThread() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:918:10 (libkudu_util.so+0x46702b)
    #4 kudu::ThreadPool::DoSubmit(std::__1::function<void ()>, kudu::ThreadPoolToken*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:703:21 (libkudu_util.so+0x464f7f)
    #5 kudu::ThreadPool::Submit(std::__1::function<void ()>) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/threadpool.cc:584:10 (libkudu_util.so+0x467592)
    #6 kudu::rpc::ReactorThread::StartConnectionNegotiation(scoped_refptr<kudu::rpc::Connection> const&) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:631:3 (libkrpc.so+0x199288)
    #7 kudu::rpc::ReactorThread::FindOrStartConnection(kudu::rpc::ConnectionId const&, kudu::rpc::CredentialsPolicy, scoped_refptr<kudu::rpc::Connection>*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:591:14 (libkrpc.so+0x199ce1)
    #8 kudu::rpc::ReactorThread::AssignOutboundCall(std::__1::shared_ptr<kudu::rpc::OutboundCall>) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:375:14 (libkrpc.so+0x1998a6)
    #9 kudu::rpc::AssignOutboundCallTask::Run(kudu::rpc::ReactorThread*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:907:14 (libkrpc.so+0x1a514e)
    #10 kudu::rpc::ReactorThread::AsyncHandler(ev::async&, int) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:349:10 (libkrpc.so+0x198c12)
    #11 void ev::base<ev_async, ev::async>::method_thunk<kudu::rpc::ReactorThread, &(kudu::rpc::ReactorThread::AsyncHandler(ev::async&, int))>(ev_loop*, ev_async*, int) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/ev++.h:497:7 (libkrpc.so+0x1a6373)
    #12 ev_invoke_pending /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/libev-4.33/ev.c:3770:11 (libev.so.4+0x9d31)
    #13 kudu::rpc::ReactorThread::InvokePendingCb(ev_loop*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:205:3 (libkrpc.so+0x197ae5)
    #14 ev_run /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/libev-4.33/ev.c:4190:7 (libev.so.4+0xaf9a)
    #15 ev::loop_ref::run(int) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/ev++.h:211:7 (libkrpc.so+0x1a2bf8)
    #16 kudu::rpc::ReactorThread::RunThread() /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:510:9 (libkrpc.so+0x19b0c5)
    #17 kudu::rpc::ReactorThread::Init()::$_0::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/rpc/reactor.cc:197:48 (libkrpc.so+0x19de11)
    #18 decltype(std::__1::forward<kudu::rpc::ReactorThread::Init()::$_0&>(fp)()) std::__1::__invoke<kudu::rpc::ReactorThread::Init()::$_0&>(kudu::rpc::ReactorThread::Init()::$_0&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/type_traits:3905:1 (libkrpc.so+0x19ddc9)
    #19 void std::__1::__invoke_void_return_wrapper<void>::__call<kudu::rpc::ReactorThread::Init()::$_0&>(kudu::rpc::ReactorThread::Init()::$_0&) /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/__functional_base:348:9 (libkrpc.so+0x19dd59)
    #20 std::__1::__function::__alloc_func<kudu::rpc::ReactorThread::Init()::$_0, std::__1::allocator<kudu::rpc::ReactorThread::Init()::$_0>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1557:16 (libkrpc.so+0x19dd21)
    #21 std::__1::__function::__func<kudu::rpc::ReactorThread::Init()::$_0, std::__1::allocator<kudu::rpc::ReactorThread::Init()::$_0>, void ()>::operator()() /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1731:12 (libkrpc.so+0x19d01d)
    #22 std::__1::__function::__value_func<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:1884:16 (libtserver_test_util.so+0x60694)
    #23 std::__1::function<void ()>::operator()() const /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/installed/tsan/include/c++/v1/functional:2556:12 (libtserver_test_util.so+0x604c9)
    #24 kudu::Thread::SuperviseThread(void*) /home/jenkins-slave/workspace/build_and_test_flaky@2/src/kudu/util/thread.cc:698:3 (libkudu_util.so+0x44a116)

SUMMARY: ThreadSanitizer: data race /home/jenkins-slave/workspace/build_and_test_flaky@2/thirdparty/src/llvm-11.0.0.src/projects/compiler-rt/lib/tsan/rtl/tsan_new_delete.cpp:126 in operator delete(void*)
==================
ThreadSanitizer: reported 3 warnings