[JENKINS] Lucene-Solr-Tests-master - Build # 3642 - Unstable

classic Classic list List threaded Threaded
2 messages Options
Reply | Threaded
Open this post in threaded view
|

[JENKINS] Lucene-Solr-Tests-master - Build # 3642 - Unstable

Apache Jenkins Server-2
Build: https://builds.apache.org/job/Lucene-Solr-Tests-master/3642/

1 tests failed.
FAILED:  org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore.testRestoreFailure

Error Message:
Failed collection is still in the clusterstate: DocCollection(hdfsbackuprestore_testfailure_restored//collections/hdfsbackuprestore_testfailure_restored/state.json/2)={   "pullReplicas":0,   "replicationFactor":2,   "shards":{     "shard2":{       "range":"0-7fffffff",       "state":"construction",       "replicas":{"core_node2":{           "core":"hdfsbackuprestore_testfailure_restored_shard2_replica_n1",           "base_url":"http://127.0.0.1:43001/solr",           "node_name":"127.0.0.1:43001_solr",           "state":"down",           "type":"NRT",           "force_set_state":"false"}},       "stateTimestamp":"1567066874938833066"},     "shard1":{       "range":"80000000-ffffffff",       "state":"construction",       "replicas":{},       "stateTimestamp":"1567066874938893099"}},   "router":{"name":"compositeId"},   "maxShardsPerNode":"2",   "autoAddReplicas":"false",   "nrtReplicas":2,   "tlogReplicas":0} Expected: not a collection containing "hdfsbackuprestore_testfailure_restored"      but: was <[hdfsbackuprestore_testfailure_restored, hdfsbackuprestore_testfailure]>

Stack Trace:
java.lang.AssertionError: Failed collection is still in the clusterstate: DocCollection(hdfsbackuprestore_testfailure_restored//collections/hdfsbackuprestore_testfailure_restored/state.json/2)={
  "pullReplicas":0,
  "replicationFactor":2,
  "shards":{
    "shard2":{
      "range":"0-7fffffff",
      "state":"construction",
      "replicas":{"core_node2":{
          "core":"hdfsbackuprestore_testfailure_restored_shard2_replica_n1",
          "base_url":"http://127.0.0.1:43001/solr",
          "node_name":"127.0.0.1:43001_solr",
          "state":"down",
          "type":"NRT",
          "force_set_state":"false"}},
      "stateTimestamp":"1567066874938833066"},
    "shard1":{
      "range":"80000000-ffffffff",
      "state":"construction",
      "replicas":{},
      "stateTimestamp":"1567066874938893099"}},
  "router":{"name":"compositeId"},
  "maxShardsPerNode":"2",
  "autoAddReplicas":"false",
  "nrtReplicas":2,
  "tlogReplicas":0}
Expected: not a collection containing "hdfsbackuprestore_testfailure_restored"
     but: was <[hdfsbackuprestore_testfailure_restored, hdfsbackuprestore_testfailure]>
        at __randomizedtesting.SeedInfo.seed([8CA470F856C867D3:A5D8EEDD7E9164FE]:0)
        at org.hamcrest.MatcherAssert.assertThat(MatcherAssert.java:20)
        at org.junit.Assert.assertThat(Assert.java:956)
        at org.apache.solr.cloud.api.collections.AbstractCloudBackupRestoreTestCase.testRestoreFailure(AbstractCloudBackupRestoreTestCase.java:211)
        at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.base/java.lang.reflect.Method.invoke(Method.java:566)
        at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1750)
        at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:938)
        at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:974)
        at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:988)
        at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
        at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
        at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
        at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
        at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:947)
        at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:832)
        at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:883)
        at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:894)
        at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
        at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
        at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
        at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at java.base/java.lang.Thread.run(Thread.java:834)




Build Log:
[...truncated 12966 lines...]
   [junit4] Suite: org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore
   [junit4]   2> 132578 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.SolrTestCaseJ4 Created dataDir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8CA470F856C867D3-001/data-dir-25-001
   [junit4]   2> 132578 WARN  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=1 numCloses=1
   [junit4]   2> 132578 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.SolrTestCaseJ4 Using TrieFields (NUMERIC_POINTS_SYSPROP=false) w/NUMERIC_DOCVALUES_SYSPROP=true
   [junit4]   2> 132579 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (false) via: @org.apache.solr.util.RandomizeSSL(reason="", ssl=0.0/0.0, value=0.0/0.0, clientAuth=0.0/0.0)
   [junit4]   2> 132579 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.SolrTestCaseJ4 SecureRandom sanity checks: test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
   [junit4]   2> 133051 WARN  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.h.u.NativeCodeLoader Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
   [junit4]   1> Formatting using clusterid: testClusterID
   [junit4]   2> 134026 WARN  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.h.m.i.MetricsConfig Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties
   [junit4]   2> 134342 WARN  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 134395 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.Server jetty-9.4.19.v20190610; built: 2019-06-10T16:30:51.723Z; git: afcf563148970e98786327af5e07c261fda175d3; jvm 11.0.1+13-LTS
   [junit4]   2> 134426 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 134426 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 134426 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.session node0 Scavenging every 600000ms
   [junit4]   2> 134428 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@1df63923{static,/static,jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-3.2.0-tests.jar!/webapps/static,AVAILABLE}
   [junit4]   2> 134773 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.h.ContextHandler Started o.e.j.w.WebAppContext@1a68534{hdfs,/,file:///home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/jetty-localhost.localdomain-42083-hdfs-_-any-9567057768306693800.dir/webapp/,AVAILABLE}{/hdfs}
   [junit4]   2> 134775 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.AbstractConnector Started ServerConnector@11bf2c1e{HTTP/1.1,[http/1.1]}{localhost.localdomain:42083}
   [junit4]   2> 134775 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.Server Started @134832ms
   [junit4]   2> 136006 WARN  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 136012 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.Server jetty-9.4.19.v20190610; built: 2019-06-10T16:30:51.723Z; git: afcf563148970e98786327af5e07c261fda175d3; jvm 11.0.1+13-LTS
   [junit4]   2> 136014 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 136014 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 136015 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.session node0 Scavenging every 600000ms
   [junit4]   2> 136016 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@7e95eb18{static,/static,jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-3.2.0-tests.jar!/webapps/static,AVAILABLE}
   [junit4]   2> 136207 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.h.ContextHandler Started o.e.j.w.WebAppContext@16eb4cdc{datanode,/,file:///home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/jetty-localhost-46209-datanode-_-any-14485046069961529261.dir/webapp/,AVAILABLE}{/datanode}
   [junit4]   2> 136207 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.AbstractConnector Started ServerConnector@163ea172{HTTP/1.1,[http/1.1]}{localhost:46209}
   [junit4]   2> 136207 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.Server Started @136265ms
   [junit4]   2> 136547 WARN  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 136551 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.Server jetty-9.4.19.v20190610; built: 2019-06-10T16:30:51.723Z; git: afcf563148970e98786327af5e07c261fda175d3; jvm 11.0.1+13-LTS
   [junit4]   2> 136561 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 136561 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 136561 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.session node0 Scavenging every 660000ms
   [junit4]   2> 136562 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@23b54d2c{static,/static,jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-3.2.0-tests.jar!/webapps/static,AVAILABLE}
   [junit4]   2> 136745 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.h.ContextHandler Started o.e.j.w.WebAppContext@460be5d8{datanode,/,file:///home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/jetty-localhost-35833-datanode-_-any-5158532470475212041.dir/webapp/,AVAILABLE}{/datanode}
   [junit4]   2> 136746 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.AbstractConnector Started ServerConnector@9fa4e9f{HTTP/1.1,[http/1.1]}{localhost:35833}
   [junit4]   2> 136746 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.e.j.s.Server Started @136803ms
   [junit4]   2> 137770 INFO  (Block report processor) [     ] BlockStateChange BLOCK* processReport 0x10bff90e435844bc: Processing first storage report for DS-e779cbdc-1684-4cf7-8513-3d666aa25046 from datanode 5be610ec-972c-483a-b403-09634bf4222d
   [junit4]   2> 137773 INFO  (Block report processor) [     ] BlockStateChange BLOCK* processReport 0x10bff90e435844bc: from storage DS-e779cbdc-1684-4cf7-8513-3d666aa25046 node DatanodeRegistration(127.0.0.1:46373, datanodeUuid=5be610ec-972c-483a-b403-09634bf4222d, infoPort=44147, infoSecurePort=0, ipcPort=37747, storageInfo=lv=-57;cid=testClusterID;nsid=1999202858;c=1567066857702), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0
   [junit4]   2> 137773 INFO  (Block report processor) [     ] BlockStateChange BLOCK* processReport 0xe02877f1407c89aa: Processing first storage report for DS-7f38f1bf-3c73-4b4b-9e63-3bcb66513607 from datanode 9910e9f7-3d52-4fe9-b8c7-f66d8ed86041
   [junit4]   2> 137773 INFO  (Block report processor) [     ] BlockStateChange BLOCK* processReport 0xe02877f1407c89aa: from storage DS-7f38f1bf-3c73-4b4b-9e63-3bcb66513607 node DatanodeRegistration(127.0.0.1:36487, datanodeUuid=9910e9f7-3d52-4fe9-b8c7-f66d8ed86041, infoPort=46615, infoSecurePort=0, ipcPort=46713, storageInfo=lv=-57;cid=testClusterID;nsid=1999202858;c=1567066857702), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0
   [junit4]   2> 137773 INFO  (Block report processor) [     ] BlockStateChange BLOCK* processReport 0xe02877f1407c89aa: Processing first storage report for DS-fae4602a-65dc-4af6-ae65-a0667812b3da from datanode 9910e9f7-3d52-4fe9-b8c7-f66d8ed86041
   [junit4]   2> 137773 INFO  (Block report processor) [     ] BlockStateChange BLOCK* processReport 0xe02877f1407c89aa: from storage DS-fae4602a-65dc-4af6-ae65-a0667812b3da node DatanodeRegistration(127.0.0.1:36487, datanodeUuid=9910e9f7-3d52-4fe9-b8c7-f66d8ed86041, infoPort=46615, infoSecurePort=0, ipcPort=46713, storageInfo=lv=-57;cid=testClusterID;nsid=1999202858;c=1567066857702), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0
   [junit4]   2> 137773 INFO  (Block report processor) [     ] BlockStateChange BLOCK* processReport 0x10bff90e435844bc: Processing first storage report for DS-93c29fe2-89de-49db-970f-93a7a4502ec2 from datanode 5be610ec-972c-483a-b403-09634bf4222d
   [junit4]   2> 137773 INFO  (Block report processor) [     ] BlockStateChange BLOCK* processReport 0x10bff90e435844bc: from storage DS-93c29fe2-89de-49db-970f-93a7a4502ec2 node DatanodeRegistration(127.0.0.1:46373, datanodeUuid=5be610ec-972c-483a-b403-09634bf4222d, infoPort=44147, infoSecurePort=0, ipcPort=37747, storageInfo=lv=-57;cid=testClusterID;nsid=1999202858;c=1567066857702), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0
   [junit4]   2> 137938 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.c.MiniSolrCloudCluster Starting cluster of 2 servers in /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8CA470F856C867D3-001/tempDir-002
   [junit4]   2> 137939 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
   [junit4]   2> 137939 INFO  (ZkTestServer Run Thread) [     ] o.a.s.c.ZkTestServer client port:0.0.0.0/0.0.0.0:0
   [junit4]   2> 137939 INFO  (ZkTestServer Run Thread) [     ] o.a.s.c.ZkTestServer Starting server
   [junit4]   2> 138039 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.c.ZkTestServer start zk server on port:41679
   [junit4]   2> 138039 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.c.ZkTestServer waitForServerUp: 127.0.0.1:41679
   [junit4]   2> 138039 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.c.ZkTestServer parse host and port list: 127.0.0.1:41679
   [junit4]   2> 138039 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.c.ZkTestServer connecting to 127.0.0.1 41679
   [junit4]   2> 138055 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 138099 INFO  (zkConnectionManagerCallback-781-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 138099 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 138144 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 138164 INFO  (zkConnectionManagerCallback-783-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 138165 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 138173 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 138185 INFO  (zkConnectionManagerCallback-785-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 138185 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 138292 WARN  (jetty-launcher-786-thread-1) [     ] o.e.j.s.AbstractConnector Ignoring deprecated socket close linger time
   [junit4]   2> 138300 INFO  (jetty-launcher-786-thread-1) [     ] o.a.s.c.s.e.JettySolrRunner Start Jetty (original configured port=0)
   [junit4]   2> 138300 INFO  (jetty-launcher-786-thread-1) [     ] o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 1 ...
   [junit4]   2> 138301 WARN  (jetty-launcher-786-thread-2) [     ] o.e.j.s.AbstractConnector Ignoring deprecated socket close linger time
   [junit4]   2> 138301 INFO  (jetty-launcher-786-thread-2) [     ] o.a.s.c.s.e.JettySolrRunner Start Jetty (original configured port=0)
   [junit4]   2> 138301 INFO  (jetty-launcher-786-thread-2) [     ] o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 1 ...
   [junit4]   2> 138302 INFO  (jetty-launcher-786-thread-2) [     ] o.e.j.s.Server jetty-9.4.19.v20190610; built: 2019-06-10T16:30:51.723Z; git: afcf563148970e98786327af5e07c261fda175d3; jvm 11.0.1+13-LTS
   [junit4]   2> 138300 INFO  (jetty-launcher-786-thread-1) [     ] o.e.j.s.Server jetty-9.4.19.v20190610; built: 2019-06-10T16:30:51.723Z; git: afcf563148970e98786327af5e07c261fda175d3; jvm 11.0.1+13-LTS
   [junit4]   2> 138379 INFO  (jetty-launcher-786-thread-1) [     ] o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 138379 INFO  (jetty-launcher-786-thread-1) [     ] o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 138379 INFO  (jetty-launcher-786-thread-1) [     ] o.e.j.s.session node0 Scavenging every 660000ms
   [junit4]   2> 138382 INFO  (jetty-launcher-786-thread-1) [     ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@4f0d2b03{/solr,null,AVAILABLE}
   [junit4]   2> 138384 INFO  (jetty-launcher-786-thread-1) [     ] o.e.j.s.AbstractConnector Started ServerConnector@69bf5f30{HTTP/1.1,[http/1.1, h2c]}{127.0.0.1:43001}
   [junit4]   2> 138384 INFO  (jetty-launcher-786-thread-1) [     ] o.e.j.s.Server Started @138441ms
   [junit4]   2> 138384 INFO  (jetty-launcher-786-thread-1) [     ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr, hostPort=43001}
   [junit4]   2> 138385 ERROR (jetty-launcher-786-thread-1) [     ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 138385 INFO  (jetty-launcher-786-thread-1) [     ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
   [junit4]   2> 138385 INFO  (jetty-launcher-786-thread-1) [     ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr™ version 9.0.0
   [junit4]   2> 138385 INFO  (jetty-launcher-786-thread-1) [     ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 138385 INFO  (jetty-launcher-786-thread-1) [     ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 138385 INFO  (jetty-launcher-786-thread-1) [     ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2019-08-29T08:21:02.404510Z
   [junit4]   2> 138387 INFO  (jetty-launcher-786-thread-2) [     ] o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 138387 INFO  (jetty-launcher-786-thread-2) [     ] o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 138387 INFO  (jetty-launcher-786-thread-2) [     ] o.e.j.s.session node0 Scavenging every 660000ms
   [junit4]   2> 138395 INFO  (jetty-launcher-786-thread-2) [     ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@4d1997b2{/solr,null,AVAILABLE}
   [junit4]   2> 138396 INFO  (jetty-launcher-786-thread-2) [     ] o.e.j.s.AbstractConnector Started ServerConnector@7e27179{HTTP/1.1,[http/1.1, h2c]}{127.0.0.1:39505}
   [junit4]   2> 138396 INFO  (jetty-launcher-786-thread-2) [     ] o.e.j.s.Server Started @138453ms
   [junit4]   2> 138396 INFO  (jetty-launcher-786-thread-2) [     ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr, hostPort=39505}
   [junit4]   2> 138397 ERROR (jetty-launcher-786-thread-2) [     ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 138397 INFO  (jetty-launcher-786-thread-2) [     ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
   [junit4]   2> 138397 INFO  (jetty-launcher-786-thread-2) [     ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr™ version 9.0.0
   [junit4]   2> 138397 INFO  (jetty-launcher-786-thread-2) [     ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 138397 INFO  (jetty-launcher-786-thread-2) [     ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 138397 INFO  (jetty-launcher-786-thread-2) [     ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2019-08-29T08:21:02.416599Z
   [junit4]   2> 138412 INFO  (jetty-launcher-786-thread-1) [     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 138420 INFO  (jetty-launcher-786-thread-2) [     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 138421 INFO  (zkConnectionManagerCallback-788-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 138421 INFO  (jetty-launcher-786-thread-1) [     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 138422 INFO  (jetty-launcher-786-thread-1) [     ] o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading...
   [junit4]   2> 138423 INFO  (zkConnectionManagerCallback-790-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 138423 INFO  (jetty-launcher-786-thread-2) [     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 138429 INFO  (jetty-launcher-786-thread-2) [     ] o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading...
   [junit4]   2> 138435 INFO  (jetty-launcher-786-thread-2) [     ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@a391556, but no JMX reporters were configured - adding default JMX reporter.
   [junit4]   2> 138436 INFO  (jetty-launcher-786-thread-1) [     ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@a391556, but no JMX reporters were configured - adding default JMX reporter.
   [junit4]   2> 138707 INFO  (jetty-launcher-786-thread-2) [     ] o.a.s.h.c.HttpShardHandlerFactory Host whitelist initialized: WhitelistHostChecker [whitelistHosts=null, whitelistHostCheckingEnabled=true]
   [junit4]   2> 138709 WARN  (jetty-launcher-786-thread-2) [     ] o.e.j.u.s.S.config Trusting all certificates configured for Client@648b78e0[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 138709 WARN  (jetty-launcher-786-thread-2) [     ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for Client@648b78e0[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 138734 WARN  (jetty-launcher-786-thread-2) [     ] o.e.j.u.s.S.config Trusting all certificates configured for Client@40cd170b[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 138734 WARN  (jetty-launcher-786-thread-2) [     ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for Client@40cd170b[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 138737 INFO  (jetty-launcher-786-thread-2) [     ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:41679/solr
   [junit4]   2> 138754 INFO  (jetty-launcher-786-thread-2) [     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 138771 INFO  (zkConnectionManagerCallback-798-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 138771 INFO  (jetty-launcher-786-thread-2) [     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 138851 INFO  (jetty-launcher-786-thread-1) [     ] o.a.s.h.c.HttpShardHandlerFactory Host whitelist initialized: WhitelistHostChecker [whitelistHosts=null, whitelistHostCheckingEnabled=true]
   [junit4]   2> 138864 WARN  (jetty-launcher-786-thread-1) [     ] o.e.j.u.s.S.config Trusting all certificates configured for Client@41fc7833[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 138864 WARN  (jetty-launcher-786-thread-1) [     ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for Client@41fc7833[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 138908 WARN  (jetty-launcher-786-thread-1) [     ] o.e.j.u.s.S.config Trusting all certificates configured for Client@419da50[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 138908 WARN  (jetty-launcher-786-thread-1) [     ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for Client@419da50[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 138910 INFO  (jetty-launcher-786-thread-1) [     ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:41679/solr
   [junit4]   2> 138919 INFO  (jetty-launcher-786-thread-2) [n:127.0.0.1:39505_solr     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 138943 INFO  (jetty-launcher-786-thread-1) [     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 138963 INFO  (zkConnectionManagerCallback-802-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 138963 INFO  (jetty-launcher-786-thread-2) [n:127.0.0.1:39505_solr     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 138969 INFO  (zkConnectionManagerCallback-806-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 138969 INFO  (jetty-launcher-786-thread-1) [     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 139092 INFO  (jetty-launcher-786-thread-1) [n:127.0.0.1:43001_solr     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 139102 INFO  (zkConnectionManagerCallback-808-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 139102 INFO  (jetty-launcher-786-thread-1) [n:127.0.0.1:43001_solr     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 139138 INFO  (jetty-launcher-786-thread-1) [n:127.0.0.1:43001_solr     ] o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:43001_solr
   [junit4]   2> 139142 INFO  (jetty-launcher-786-thread-1) [n:127.0.0.1:43001_solr     ] o.a.s.c.Overseer Overseer (id=72286213500829704-127.0.0.1:43001_solr-n_0000000000) starting
   [junit4]   2> 139208 INFO  (jetty-launcher-786-thread-1) [n:127.0.0.1:43001_solr     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 139209 INFO  (zkConnectionManagerCallback-817-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 139209 INFO  (jetty-launcher-786-thread-1) [n:127.0.0.1:43001_solr     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 139219 INFO  (jetty-launcher-786-thread-1) [n:127.0.0.1:43001_solr     ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:41679/solr ready
   [junit4]   2> 139237 INFO  (OverseerStateUpdate-72286213500829704-127.0.0.1:43001_solr-n_0000000000) [n:127.0.0.1:43001_solr     ] o.a.s.c.Overseer Starting to work on the main queue : 127.0.0.1:43001_solr
   [junit4]   2> 139270 INFO  (jetty-launcher-786-thread-1) [n:127.0.0.1:43001_solr     ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:43001_solr
   [junit4]   2> 139274 INFO  (jetty-launcher-786-thread-1) [n:127.0.0.1:43001_solr     ] o.a.s.c.PackageManager clusterprops.json changed , version -1
   [junit4]   2> 139274 INFO  (jetty-launcher-786-thread-1) [n:127.0.0.1:43001_solr     ] o.a.s.c.b.r.BackupRepositoryFactory Added backup repository with configuration params {type = repository,name = hdfs,class = org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes = {name=hdfs, class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args = {location=/backup,solr.hdfs.home=hdfs://localhost.localdomain:38141/solr,solr.hdfs.confdir=}}
   [junit4]   2> 139274 INFO  (jetty-launcher-786-thread-1) [n:127.0.0.1:43001_solr     ] o.a.s.c.b.r.BackupRepositoryFactory Added backup repository with configuration params {type = repository,name = poisioned,class = org.apache.solr.cloud.api.collections.TestLocalFSCloudBackupRestore$PoinsionedRepository,attributes = {default=true, name=poisioned, class=org.apache.solr.cloud.api.collections.TestLocalFSCloudBackupRestore$PoinsionedRepository},}
   [junit4]   2> 139274 INFO  (jetty-launcher-786-thread-1) [n:127.0.0.1:43001_solr     ] o.a.s.c.b.r.BackupRepositoryFactory Default configuration for backup repository is with configuration params {type = repository,name = poisioned,class = org.apache.solr.cloud.api.collections.TestLocalFSCloudBackupRestore$PoinsionedRepository,attributes = {default=true, name=poisioned, class=org.apache.solr.cloud.api.collections.TestLocalFSCloudBackupRestore$PoinsionedRepository},}
   [junit4]   2> 139297 INFO  (zkCallback-807-thread-1) [     ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 139295 INFO  (jetty-launcher-786-thread-2) [n:127.0.0.1:39505_solr     ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 139308 INFO  (jetty-launcher-786-thread-2) [n:127.0.0.1:39505_solr     ] o.a.s.c.ZkController Publish node=127.0.0.1:39505_solr as DOWN
   [junit4]   2> 139310 INFO  (jetty-launcher-786-thread-2) [n:127.0.0.1:39505_solr     ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 2147483647 transient cores
   [junit4]   2> 139310 INFO  (jetty-launcher-786-thread-2) [n:127.0.0.1:39505_solr     ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:39505_solr
   [junit4]   2> 139313 INFO  (zkCallback-807-thread-1) [     ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 139321 INFO  (zkCallback-816-thread-1) [     ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
   [junit4]   2> 139325 INFO  (jetty-launcher-786-thread-2) [n:127.0.0.1:39505_solr     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 139330 INFO  (zkCallback-801-thread-1) [     ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 139365 INFO  (zkConnectionManagerCallback-822-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 139365 INFO  (jetty-launcher-786-thread-2) [n:127.0.0.1:39505_solr     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 139368 INFO  (jetty-launcher-786-thread-2) [n:127.0.0.1:39505_solr     ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
   [junit4]   2> 139371 INFO  (jetty-launcher-786-thread-2) [n:127.0.0.1:39505_solr     ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:41679/solr ready
   [junit4]   2> 139372 INFO  (jetty-launcher-786-thread-2) [n:127.0.0.1:39505_solr     ] o.a.s.c.PackageManager clusterprops.json changed , version -1
   [junit4]   2> 139372 INFO  (jetty-launcher-786-thread-2) [n:127.0.0.1:39505_solr     ] o.a.s.c.b.r.BackupRepositoryFactory Added backup repository with configuration params {type = repository,name = hdfs,class = org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes = {name=hdfs, class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args = {location=/backup,solr.hdfs.home=hdfs://localhost.localdomain:38141/solr,solr.hdfs.confdir=}}
   [junit4]   2> 139372 INFO  (jetty-launcher-786-thread-2) [n:127.0.0.1:39505_solr     ] o.a.s.c.b.r.BackupRepositoryFactory Added backup repository with configuration params {type = repository,name = poisioned,class = org.apache.solr.cloud.api.collections.TestLocalFSCloudBackupRestore$PoinsionedRepository,attributes = {default=true, name=poisioned, class=org.apache.solr.cloud.api.collections.TestLocalFSCloudBackupRestore$PoinsionedRepository},}
   [junit4]   2> 139372 INFO  (jetty-launcher-786-thread-2) [n:127.0.0.1:39505_solr     ] o.a.s.c.b.r.BackupRepositoryFactory Default configuration for backup repository is with configuration params {type = repository,name = poisioned,class = org.apache.solr.cloud.api.collections.TestLocalFSCloudBackupRestore$PoinsionedRepository,attributes = {default=true, name=poisioned, class=org.apache.solr.cloud.api.collections.TestLocalFSCloudBackupRestore$PoinsionedRepository},}
   [junit4]   2> 139372 INFO  (jetty-launcher-786-thread-1) [n:127.0.0.1:43001_solr     ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
   [junit4]   2> 139452 INFO  (jetty-launcher-786-thread-2) [n:127.0.0.1:39505_solr     ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
   [junit4]   2> 139462 INFO  (jetty-launcher-786-thread-1) [n:127.0.0.1:43001_solr     ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@a391556
   [junit4]   2> 139559 INFO  (jetty-launcher-786-thread-2) [n:127.0.0.1:39505_solr     ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@a391556
   [junit4]   2> 139567 INFO  (jetty-launcher-786-thread-1) [n:127.0.0.1:43001_solr     ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@a391556
   [junit4]   2> 139567 INFO  (jetty-launcher-786-thread-1) [n:127.0.0.1:43001_solr     ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@a391556
   [junit4]   2> 139569 INFO  (jetty-launcher-786-thread-1) [n:127.0.0.1:43001_solr     ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8CA470F856C867D3-001/tempDir-002/node1/.
   [junit4]   2> 139608 INFO  (jetty-launcher-786-thread-2) [n:127.0.0.1:39505_solr     ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@a391556
   [junit4]   2> 139608 INFO  (jetty-launcher-786-thread-2) [n:127.0.0.1:39505_solr     ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@a391556
   [junit4]   2> 139612 INFO  (jetty-launcher-786-thread-2) [n:127.0.0.1:39505_solr     ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8CA470F856C867D3-001/tempDir-002/node2/.
   [junit4]   2> 139745 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.c.MiniSolrCloudCluster waitForAllNodes: numServers=2
   [junit4]   2> 139764 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 139772 INFO  (zkConnectionManagerCallback-829-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 139772 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 139774 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
   [junit4]   2> 139776 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[8CA470F856C867D3]-worker) [     ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:41679/solr ready
   [junit4]   2> 139907 INFO  (TEST-TestHdfsCloudBackupRestore.testRestoreFailure-seed#[8CA470F856C867D3]) [     ] o.a.s.SolrTestCaseJ4 ###Starting testRestoreFailure
   [junit4]   2> 139919 INFO  (qtp153456173-2147) [n:127.0.0.1:39505_solr     ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params pullReplicas=0&collection.configName=conf1&maxShardsPerNode=2&name=hdfsbackuprestore_testfailure&nrtReplicas=2&action=CREATE&numShards=2&tlogReplicas=0&wt=javabin&version=2 and sendToOCPQueue=true
   [junit4]   2> 139924 INFO  (OverseerThreadFactory-609-thread-1-processing-n:127.0.0.1:43001_solr) [n:127.0.0.1:43001_solr     ] o.a.s.c.a.c.CreateCollectionCmd Create collection hdfsbackuprestore_testfailure
   [junit4]   2> 140052 INFO  (OverseerStateUpdate-72286213500829704-127.0.0.1:43001_solr-n_0000000000) [n:127.0.0.1:43001_solr     ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore_testfailure",
   [junit4]   2>   "shard":"shard1",
   [junit4]   2>   "core":"hdfsbackuprestore_testfailure_shard1_replica_n1",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"http://127.0.0.1:39505/solr",
   [junit4]   2>   "type":"NRT",
   [junit4]   2>   "waitForFinalState":"false"}
   [junit4]   2> 140059 INFO  (OverseerStateUpdate-72286213500829704-127.0.0.1:43001_solr-n_0000000000) [n:127.0.0.1:43001_solr     ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore_testfailure",
   [junit4]   2>   "shard":"shard1",
   [junit4]   2>   "core":"hdfsbackuprestore_testfailure_shard1_replica_n3",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"http://127.0.0.1:43001/solr",
   [junit4]   2>   "type":"NRT",
   [junit4]   2>   "waitForFinalState":"false"}
   [junit4]   2> 140065 INFO  (OverseerStateUpdate-72286213500829704-127.0.0.1:43001_solr-n_0000000000) [n:127.0.0.1:43001_solr     ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore_testfailure",
   [junit4]   2>   "shard":"shard2",
   [junit4]   2>   "core":"hdfsbackuprestore_testfailure_shard2_replica_n5",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"http://127.0.0.1:39505/solr",
   [junit4]   2>   "type":"NRT",
   [junit4]   2>   "waitForFinalState":"false"}
   [junit4]   2> 140072 INFO  (OverseerStateUpdate-72286213500829704-127.0.0.1:43001_solr-n_0000000000) [n:127.0.0.1:43001_solr     ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore_testfailure",
   [junit4]   2>   "shard":"shard2",
   [junit4]   2>   "core":"hdfsbackuprestore_testfailure_shard2_replica_n7",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"http://127.0.0.1:43001/solr",
   [junit4]   2>   "type":"NRT",
   [junit4]   2>   "waitForFinalState":"false"}
   [junit4]   2> 140278 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr    x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&coreNodeName=core_node2&collection.configName=conf1&newCollection=true&name=hdfsbackuprestore_testfailure_shard1_replica_n1&action=CREATE&numShards=2&collection=hdfsbackuprestore_testfailure&shard=shard1&wt=javabin&version=2&replicaType=NRT
   [junit4]   2> 140281 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr    x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&coreNodeName=core_node6&collection.configName=conf1&newCollection=true&name=hdfsbackuprestore_testfailure_shard2_replica_n5&action=CREATE&numShards=2&collection=hdfsbackuprestore_testfailure&shard=shard2&wt=javabin&version=2&replicaType=NRT
   [junit4]   2> 140289 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr    x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&coreNodeName=core_node4&collection.configName=conf1&newCollection=true&name=hdfsbackuprestore_testfailure_shard1_replica_n3&action=CREATE&numShards=2&collection=hdfsbackuprestore_testfailure&shard=shard1&wt=javabin&version=2&replicaType=NRT
   [junit4]   2> 140289 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr    x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 2147483647 transient cores
   [junit4]   2> 140295 INFO  (qtp1735966127-2150) [n:127.0.0.1:43001_solr    x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&coreNodeName=core_node8&collection.configName=conf1&newCollection=true&name=hdfsbackuprestore_testfailure_shard2_replica_n7&action=CREATE&numShards=2&collection=hdfsbackuprestore_testfailure&shard=shard2&wt=javabin&version=2&replicaType=NRT
   [junit4]   2> 141297 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.c.SolrConfig Using Lucene MatchVersion: 9.0.0
   [junit4]   2> 141317 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.c.SolrConfig Using Lucene MatchVersion: 9.0.0
   [junit4]   2> 141318 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.s.IndexSchema [hdfsbackuprestore_testfailure_shard2_replica_n5] Schema name=minimal
   [junit4]   2> 141355 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id
   [junit4]   2> 141355 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.c.CoreContainer Creating SolrCore 'hdfsbackuprestore_testfailure_shard2_replica_n5' using configuration from collection hdfsbackuprestore_testfailure, trusted=true
   [junit4]   2> 141356 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.hdfsbackuprestore_testfailure.shard2.replica_n5' (registry 'solr.core.hdfsbackuprestore_testfailure.shard2.replica_n5') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@a391556
   [junit4]   2> 141359 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.c.SolrCore [[hdfsbackuprestore_testfailure_shard2_replica_n5] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8CA470F856C867D3-001/tempDir-002/node2/hdfsbackuprestore_testfailure_shard2_replica_n5], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8CA470F856C867D3-001/tempDir-002/node2/./hdfsbackuprestore_testfailure_shard2_replica_n5/data/]
   [junit4]   2> 141363 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.s.IndexSchema [hdfsbackuprestore_testfailure_shard1_replica_n1] Schema name=minimal
   [junit4]   2> 141365 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.c.SolrConfig Using Lucene MatchVersion: 9.0.0
   [junit4]   2> 141365 INFO  (qtp1735966127-2150) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node8 x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.c.SolrConfig Using Lucene MatchVersion: 9.0.0
   [junit4]   2> 141376 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id
   [junit4]   2> 141376 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.c.CoreContainer Creating SolrCore 'hdfsbackuprestore_testfailure_shard1_replica_n1' using configuration from collection hdfsbackuprestore_testfailure, trusted=true
   [junit4]   2> 141377 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.hdfsbackuprestore_testfailure.shard1.replica_n1' (registry 'solr.core.hdfsbackuprestore_testfailure.shard1.replica_n1') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@a391556
   [junit4]   2> 141378 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.c.SolrCore [[hdfsbackuprestore_testfailure_shard1_replica_n1] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8CA470F856C867D3-001/tempDir-002/node2/hdfsbackuprestore_testfailure_shard1_replica_n1], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8CA470F856C867D3-001/tempDir-002/node2/./hdfsbackuprestore_testfailure_shard1_replica_n1/data/]
   [junit4]   2> 141381 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.s.IndexSchema [hdfsbackuprestore_testfailure_shard1_replica_n3] Schema name=minimal
   [junit4]   2> 141384 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id
   [junit4]   2> 141384 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.c.CoreContainer Creating SolrCore 'hdfsbackuprestore_testfailure_shard1_replica_n3' using configuration from collection hdfsbackuprestore_testfailure, trusted=true
   [junit4]   2> 141384 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.hdfsbackuprestore_testfailure.shard1.replica_n3' (registry 'solr.core.hdfsbackuprestore_testfailure.shard1.replica_n3') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@a391556
   [junit4]   2> 141385 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.c.SolrCore [[hdfsbackuprestore_testfailure_shard1_replica_n3] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8CA470F856C867D3-001/tempDir-002/node1/hdfsbackuprestore_testfailure_shard1_replica_n3], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8CA470F856C867D3-001/tempDir-002/node1/./hdfsbackuprestore_testfailure_shard1_replica_n3/data/]
   [junit4]   2> 141385 INFO  (qtp1735966127-2150) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node8 x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.s.IndexSchema [hdfsbackuprestore_testfailure_shard2_replica_n7] Schema name=minimal
   [junit4]   2> 141388 INFO  (qtp1735966127-2150) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node8 x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id
   [junit4]   2> 141389 INFO  (qtp1735966127-2150) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node8 x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.c.CoreContainer Creating SolrCore 'hdfsbackuprestore_testfailure_shard2_replica_n7' using configuration from collection hdfsbackuprestore_testfailure, trusted=true
   [junit4]   2> 141389 INFO  (qtp1735966127-2150) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node8 x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.hdfsbackuprestore_testfailure.shard2.replica_n7' (registry 'solr.core.hdfsbackuprestore_testfailure.shard2.replica_n7') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@a391556
   [junit4]   2> 141390 INFO  (qtp1735966127-2150) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node8 x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.c.SolrCore [[hdfsbackuprestore_testfailure_shard2_replica_n7] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8CA470F856C867D3-001/tempDir-002/node1/hdfsbackuprestore_testfailure_shard2_replica_n7], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8CA470F856C867D3-001/tempDir-002/node1/./hdfsbackuprestore_testfailure_shard2_replica_n7/data/]
   [junit4]   2> 141525 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 141525 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 141526 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 141526 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 141530 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.s.SolrIndexSearcher Opening [Searcher@32867733[hdfsbackuprestore_testfailure_shard2_replica_n5] main]
   [junit4]   2> 141538 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 141541 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 141541 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
   [junit4]   2> 141542 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1643188705621442560
   [junit4]   2> 141546 INFO  (searcherExecutor-618-thread-1-processing-n:127.0.0.1:39505_solr x:hdfsbackuprestore_testfailure_shard2_replica_n5 c:hdfsbackuprestore_testfailure s:shard2 r:core_node6) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.c.SolrCore [hdfsbackuprestore_testfailure_shard2_replica_n5] Registered new searcher Searcher@32867733[hdfsbackuprestore_testfailure_shard2_replica_n5] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 141552 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.c.ZkShardTerms Successful update of terms at /collections/hdfsbackuprestore_testfailure/terms/shard2 to Terms{values={core_node6=0}, version=0}
   [junit4]   2> 141552 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.c.ShardLeaderElectionContextBase make sure parent is created /collections/hdfsbackuprestore_testfailure/leaders/shard2
   [junit4]   2> 141579 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.c.ShardLeaderElectionContext Waiting until we see more replicas up for shard shard2: total=2 found=1 timeoutin=9998ms
   [junit4]   2> 141583 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 141584 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 141586 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 141586 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 141595 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.s.SolrIndexSearcher Opening [Searcher@413dfcaf[hdfsbackuprestore_testfailure_shard1_replica_n3] main]
   [junit4]   2> 141600 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 141600 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 141601 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 141601 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 141602 INFO  (qtp1735966127-2150) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node8 x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 141602 INFO  (qtp1735966127-2150) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node8 x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 141604 INFO  (qtp1735966127-2150) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node8 x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 141604 INFO  (qtp1735966127-2150) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node8 x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 141605 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.s.SolrIndexSearcher Opening [Searcher@273ef3ff[hdfsbackuprestore_testfailure_shard1_replica_n1] main]
   [junit4]   2> 141608 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 141609 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 141609 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 141609 INFO  (qtp1735966127-2150) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node8 x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.s.SolrIndexSearcher Opening [Searcher@49083f3c[hdfsbackuprestore_testfailure_shard2_replica_n7] main]
   [junit4]   2> 141610 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
   [junit4]   2> 141610 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 141611 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1643188705694842880
   [junit4]   2> 141612 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
   [junit4]   2> 141612 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1643188705695891456
   [junit4]   2> 141618 INFO  (qtp1735966127-2150) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node8 x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 141619 INFO  (qtp1735966127-2150) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node8 x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 141621 INFO  (qtp1735966127-2150) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node8 x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
   [junit4]   2> 141621 INFO  (qtp1735966127-2150) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node8 x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1643188705705328640
   [junit4]   2> 141621 INFO  (searcherExecutor-619-thread-1-processing-n:127.0.0.1:39505_solr x:hdfsbackuprestore_testfailure_shard1_replica_n1 c:hdfsbackuprestore_testfailure s:shard1 r:core_node2) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.c.SolrCore [hdfsbackuprestore_testfailure_shard1_replica_n1] Registered new searcher Searcher@273ef3ff[hdfsbackuprestore_testfailure_shard1_replica_n1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 141622 INFO  (searcherExecutor-620-thread-1-processing-n:127.0.0.1:43001_solr x:hdfsbackuprestore_testfailure_shard1_replica_n3 c:hdfsbackuprestore_testfailure s:shard1 r:core_node4) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.c.SolrCore [hdfsbackuprestore_testfailure_shard1_replica_n3] Registered new searcher Searcher@413dfcaf[hdfsbackuprestore_testfailure_shard1_replica_n3] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 141634 INFO  (searcherExecutor-621-thread-1-processing-n:127.0.0.1:43001_solr x:hdfsbackuprestore_testfailure_shard2_replica_n7 c:hdfsbackuprestore_testfailure s:shard2 r:core_node8) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node8 x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.c.SolrCore [hdfsbackuprestore_testfailure_shard2_replica_n7] Registered new searcher Searcher@49083f3c[hdfsbackuprestore_testfailure_shard2_replica_n7] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 141635 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.c.ZkShardTerms Successful update of terms at /collections/hdfsbackuprestore_testfailure/terms/shard1 to Terms{values={core_node2=0}, version=0}
   [junit4]   2> 141636 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContextBase make sure parent is created /collections/hdfsbackuprestore_testfailure/leaders/shard1
   [junit4]   2> 141637 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.c.ZkShardTerms Failed to save terms, version is not a match, retrying
   [junit4]   2> 141640 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.c.ZkShardTerms Successful update of terms at /collections/hdfsbackuprestore_testfailure/terms/shard1 to Terms{values={core_node2=0, core_node4=0}, version=1}
   [junit4]   2> 141640 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.c.ShardLeaderElectionContextBase make sure parent is created /collections/hdfsbackuprestore_testfailure/leaders/shard1
   [junit4]   2> 141646 INFO  (qtp1735966127-2150) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node8 x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.c.ZkShardTerms Successful update of terms at /collections/hdfsbackuprestore_testfailure/terms/shard2 to Terms{values={core_node6=0, core_node8=0}, version=1}
   [junit4]   2> 141647 INFO  (qtp1735966127-2150) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node8 x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.c.ShardLeaderElectionContextBase make sure parent is created /collections/hdfsbackuprestore_testfailure/leaders/shard2
   [junit4]   2> 141653 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 141653 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 141653 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:39505/solr/hdfsbackuprestore_testfailure_shard1_replica_n1/
   [junit4]   2> 141654 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.u.PeerSync PeerSync: core=hdfsbackuprestore_testfailure_shard1_replica_n1 url=http://127.0.0.1:39505/solr START replicas=[http://127.0.0.1:43001/solr/hdfsbackuprestore_testfailure_shard1_replica_n3/] nUpdates=100
   [junit4]   2> 141656 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.u.PeerSync PeerSync: core=hdfsbackuprestore_testfailure_shard1_replica_n1 url=http://127.0.0.1:39505/solr DONE.  We have no versions.  sync failed.
   [junit4]   2> 141662 INFO  (qtp1735966127-2146) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.c.S.Request [hdfsbackuprestore_testfailure_shard1_replica_n3]  webapp=/solr path=/get params={distrib=false&qt=/get&fingerprint=false&getVersions=100&wt=javabin&version=2} status=0 QTime=1
   [junit4]   2> 141663 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.c.SyncStrategy Leader's attempt to sync with shard failed, moving to the next candidate
   [junit4]   2> 141663 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContext We failed sync, but we have no versions - we can't sync in that case - we were active before, so become leader anyway
   [junit4]   2> 141663 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContextBase Creating leader registration node /collections/hdfsbackuprestore_testfailure/leaders/shard1/leader after winning as /collections/hdfsbackuprestore_testfailure/leader_elect/shard1/election/72286213500829702-core_node2-n_0000000000
   [junit4]   2> 141667 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:39505/solr/hdfsbackuprestore_testfailure_shard1_replica_n1/ shard1
   [junit4]   2> 141772 INFO  (zkCallback-801-thread-1) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testfailure/state.json] for collection [hdfsbackuprestore_testfailure] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 141773 INFO  (zkCallback-801-thread-3) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testfailure/state.json] for collection [hdfsbackuprestore_testfailure] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 141773 INFO  (zkCallback-801-thread-2) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testfailure/state.json] for collection [hdfsbackuprestore_testfailure] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 141778 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 141783 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&coreNodeName=core_node2&collection.configName=conf1&newCollection=true&name=hdfsbackuprestore_testfailure_shard1_replica_n1&action=CREATE&numShards=2&collection=hdfsbackuprestore_testfailure&shard=shard1&wt=javabin&version=2&replicaType=NRT} status=0 QTime=1505
   [junit4]   2> 141883 INFO  (zkCallback-801-thread-1) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testfailure/state.json] for collection [hdfsbackuprestore_testfailure] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 141883 INFO  (zkCallback-801-thread-4) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testfailure/state.json] for collection [hdfsbackuprestore_testfailure] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 141883 INFO  (zkCallback-801-thread-3) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testfailure/state.json] for collection [hdfsbackuprestore_testfailure] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 141883 INFO  (zkCallback-801-thread-2) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testfailure/state.json] for collection [hdfsbackuprestore_testfailure] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 142080 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 142080 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 142080 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:39505/solr/hdfsbackuprestore_testfailure_shard2_replica_n5/
   [junit4]   2> 142081 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.u.PeerSync PeerSync: core=hdfsbackuprestore_testfailure_shard2_replica_n5 url=http://127.0.0.1:39505/solr START replicas=[http://127.0.0.1:43001/solr/hdfsbackuprestore_testfailure_shard2_replica_n7/] nUpdates=100
   [junit4]   2> 142081 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.u.PeerSync PeerSync: core=hdfsbackuprestore_testfailure_shard2_replica_n5 url=http://127.0.0.1:39505/solr DONE.  We have no versions.  sync failed.
   [junit4]   2> 142084 INFO  (qtp1735966127-2149) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node8 x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.c.S.Request [hdfsbackuprestore_testfailure_shard2_replica_n7]  webapp=/solr path=/get params={distrib=false&qt=/get&fingerprint=false&getVersions=100&wt=javabin&version=2} status=0 QTime=1
   [junit4]   2> 142084 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.c.SyncStrategy Leader's attempt to sync with shard failed, moving to the next candidate
   [junit4]   2> 142084 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.c.ShardLeaderElectionContext We failed sync, but we have no versions - we can't sync in that case - we were active before, so become leader anyway
   [junit4]   2> 142084 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.c.ShardLeaderElectionContextBase Creating leader registration node /collections/hdfsbackuprestore_testfailure/leaders/shard2/leader after winning as /collections/hdfsbackuprestore_testfailure/leader_elect/shard2/election/72286213500829702-core_node6-n_0000000000
   [junit4]   2> 142087 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:39505/solr/hdfsbackuprestore_testfailure_shard2_replica_n5/ shard2
   [junit4]   2> 142190 INFO  (zkCallback-801-thread-1) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testfailure/state.json] for collection [hdfsbackuprestore_testfailure] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 142191 INFO  (zkCallback-801-thread-2) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testfailure/state.json] for collection [hdfsbackuprestore_testfailure] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 142192 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 142192 INFO  (zkCallback-801-thread-4) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testfailure/state.json] for collection [hdfsbackuprestore_testfailure] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 142193 INFO  (zkCallback-801-thread-3) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testfailure/state.json] for collection [hdfsbackuprestore_testfailure] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 142195 INFO  (qtp153456173-2145) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&coreNodeName=core_node6&collection.configName=conf1&newCollection=true&name=hdfsbackuprestore_testfailure_shard2_replica_n5&action=CREATE&numShards=2&collection=hdfsbackuprestore_testfailure&shard=shard2&wt=javabin&version=2&replicaType=NRT} status=0 QTime=1914
   [junit4]   2> 142296 INFO  (zkCallback-801-thread-1) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testfailure/state.json] for collection [hdfsbackuprestore_testfailure] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 142297 INFO  (zkCallback-801-thread-2) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testfailure/state.json] for collection [hdfsbackuprestore_testfailure] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 142297 INFO  (zkCallback-801-thread-4) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testfailure/state.json] for collection [hdfsbackuprestore_testfailure] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 142297 INFO  (zkCallback-801-thread-3) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testfailure/state.json] for collection [hdfsbackuprestore_testfailure] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 142654 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&coreNodeName=core_node4&collection.configName=conf1&newCollection=true&name=hdfsbackuprestore_testfailure_shard1_replica_n3&action=CREATE&numShards=2&collection=hdfsbackuprestore_testfailure&shard=shard1&wt=javabin&version=2&replicaType=NRT} status=0 QTime=2368
   [junit4]   2> 142657 INFO  (qtp1735966127-2150) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node8 x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&coreNodeName=core_node8&collection.configName=conf1&newCollection=true&name=hdfsbackuprestore_testfailure_shard2_replica_n7&action=CREATE&numShards=2&collection=hdfsbackuprestore_testfailure&shard=shard2&wt=javabin&version=2&replicaType=NRT} status=0 QTime=2362
   [junit4]   2> 142664 INFO  (qtp153456173-2147) [n:127.0.0.1:39505_solr     ] o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 45 seconds. Check all shard replicas
   [junit4]   2> 142761 INFO  (zkCallback-807-thread-2) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testfailure/state.json] for collection [hdfsbackuprestore_testfailure] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 142761 INFO  (zkCallback-807-thread-1) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testfailure/state.json] for collection [hdfsbackuprestore_testfailure] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 142761 INFO  (zkCallback-801-thread-1) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testfailure/state.json] for collection [hdfsbackuprestore_testfailure] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 142762 INFO  (zkCallback-801-thread-2) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testfailure/state.json] for collection [hdfsbackuprestore_testfailure] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 142762 INFO  (zkCallback-801-thread-4) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testfailure/state.json] for collection [hdfsbackuprestore_testfailure] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 142763 INFO  (zkCallback-801-thread-3) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testfailure/state.json] for collection [hdfsbackuprestore_testfailure] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 142764 INFO  (qtp153456173-2147) [n:127.0.0.1:39505_solr     ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={pullReplicas=0&collection.configName=conf1&maxShardsPerNode=2&name=hdfsbackuprestore_testfailure&nrtReplicas=2&action=CREATE&numShards=2&tlogReplicas=0&wt=javabin&version=2} status=0 QTime=2845
   [junit4]   2> 142829 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.c.ZkShardTerms Successful update of terms at /collections/hdfsbackuprestore_testfailure/terms/shard2 to Terms{values={core_node6=1, core_node8=1}, version=2}
   [junit4]   2> 142850 INFO  (qtp153456173-2153) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.c.ZkShardTerms Successful update of terms at /collections/hdfsbackuprestore_testfailure/terms/shard1 to Terms{values={core_node2=1, core_node4=1}, version=2}
   [junit4]   2> 142869 INFO  (qtp1735966127-2149) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node8 x:hdfsbackuprestore_testfailure_shard2_replica_n7 ] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_testfailure_shard2_replica_n7]  webapp=/solr path=/update params={update.distrib=FROMLEADER&distrib.from=http://127.0.0.1:39505/solr/hdfsbackuprestore_testfailure_shard2_replica_n5/&wt=javabin&version=2}{add=[2 (1643188706932162560), 3 (1643188706953134080), 5 (1643188706953134081), 6 (1643188706954182656), 7 (1643188706954182657), 9 (1643188706955231232), 17 (1643188706955231233), 18 (1643188706957328384), 19 (1643188706957328385), 21 (1643188706958376960), ... (17 adds)]} 0 19
   [junit4]   2> 142870 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_testfailure_shard2_replica_n5]  webapp=/solr path=/update params={wt=javabin&version=2}{add=[2 (1643188706932162560), 3 (1643188706953134080), 5 (1643188706953134081), 6 (1643188706954182656), 7 (1643188706954182657), 9 (1643188706955231232), 17 (1643188706955231233), 18 (1643188706957328384), 19 (1643188706957328385), 21 (1643188706958376960), ... (17 adds)]} 0 83
   [junit4]   2> 142882 INFO  (qtp1735966127-2148) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node4 x:hdfsbackuprestore_testfailure_shard1_replica_n3 ] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_testfailure_shard1_replica_n3]  webapp=/solr path=/update params={update.distrib=FROMLEADER&distrib.from=http://127.0.0.1:39505/solr/hdfsbackuprestore_testfailure_shard1_replica_n1/&wt=javabin&version=2}{add=[0 (1643188706917482496), 1 (1643188706967814144), 4 (1643188706968862720), 8 (1643188706968862721), 10 (1643188706968862722), 11 (1643188706969911296), 12 (1643188706969911297), 13 (1643188706970959872), 14 (1643188706970959873), 15 (1643188706972008448), ... (20 adds)]} 0 32
   [junit4]   2> 142883 INFO  (qtp153456173-2153) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard1 r:core_node2 x:hdfsbackuprestore_testfailure_shard1_replica_n1 ] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_testfailure_shard1_replica_n1]  webapp=/solr path=/update params={wt=javabin&version=2}{add=[0 (1643188706917482496), 1 (1643188706967814144), 4 (1643188706968862720), 8 (1643188706968862721), 10 (1643188706968862722), 11 (1643188706969911296), 12 (1643188706969911297), 13 (1643188706970959872), 14 (1643188706970959873), 15 (1643188706972008448), ... (20 adds)]} 0 113
   [junit4]   2> 142920 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1643188707067428864,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 142920 INFO  (qtp153456173-2154) [n:127.0.0.1:39505_solr c:hdfsbackuprestore_testfailure s:shard2 r:core_node6 x:hdfsbackuprestore_testfailure_shard2_replica_n5 ] o.a.s.u.SolrIndexWriter Calling setCommitData with IW:org.apache.solr.update.SolrIndexWriter@2f9a42b1 commitCommandVersion:1643188707067428864
   [junit4]   2> 142925 INFO  (qtp1735966127-2150) [n:127.0.0.1:43001_solr c:hdfsbackuprestore_testfailure s:shard2 r:core

[...truncated too long message...]

e/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

jar-checksums:
    [mkdir] Created dir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/null506038717
     [copy] Copying 249 files to /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/null506038717
   [delete] Deleting directory /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/null506038717

check-working-copy:
[ivy:cachepath] :: resolving dependencies :: #;working@lucene1-us-west
[ivy:cachepath] confs: [default]
[ivy:cachepath] found org.eclipse.jgit#org.eclipse.jgit;5.3.0.201903130848-r in public
[ivy:cachepath] found com.jcraft#jsch;0.1.54 in public
[ivy:cachepath] found com.jcraft#jzlib;1.1.1 in public
[ivy:cachepath] found com.googlecode.javaewah#JavaEWAH;1.1.6 in public
[ivy:cachepath] found org.slf4j#slf4j-api;1.7.2 in public
[ivy:cachepath] found org.bouncycastle#bcpg-jdk15on;1.60 in public
[ivy:cachepath] found org.bouncycastle#bcprov-jdk15on;1.60 in public
[ivy:cachepath] found org.bouncycastle#bcpkix-jdk15on;1.60 in public
[ivy:cachepath] found org.slf4j#slf4j-nop;1.7.2 in public
[ivy:cachepath] :: resolution report :: resolve 33ms :: artifacts dl 2ms
        ---------------------------------------------------------------------
        |                  |            modules            ||   artifacts   |
        |       conf       | number| search|dwnlded|evicted|| number|dwnlded|
        ---------------------------------------------------------------------
        |      default     |   9   |   0   |   0   |   0   ||   9   |   0   |
        ---------------------------------------------------------------------
[wc-checker] Initializing working copy...
[wc-checker] Checking working copy status...

-jenkins-base:

BUILD SUCCESSFUL
Total time: 110 minutes 20 seconds
Archiving artifacts
java.lang.InterruptedException: no matches found within 10000
        at hudson.FilePath$ValidateAntFileMask.hasMatch(FilePath.java:2847)
        at hudson.FilePath$ValidateAntFileMask.invoke(FilePath.java:2726)
        at hudson.FilePath$ValidateAntFileMask.invoke(FilePath.java:2707)
        at hudson.FilePath$FileCallableWrapper.call(FilePath.java:3086)
Also:   hudson.remoting.Channel$CallSiteStackTrace: Remote call to lucene
                at hudson.remoting.Channel.attachCallSiteStackTrace(Channel.java:1741)
                at hudson.remoting.UserRequest$ExceptionResponse.retrieve(UserRequest.java:357)
                at hudson.remoting.Channel.call(Channel.java:955)
                at hudson.FilePath.act(FilePath.java:1072)
                at hudson.FilePath.act(FilePath.java:1061)
                at hudson.FilePath.validateAntFileMask(FilePath.java:2705)
                at hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:243)
                at hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81)
                at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
                at hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744)
                at hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690)
                at hudson.model.Build$BuildExecution.post2(Build.java:186)
                at hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635)
                at hudson.model.Run.execute(Run.java:1835)
                at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
                at hudson.model.ResourceController.execute(ResourceController.java:97)
                at hudson.model.Executor.run(Executor.java:429)
Caused: hudson.FilePath$TunneledInterruptedException
        at hudson.FilePath$FileCallableWrapper.call(FilePath.java:3088)
        at hudson.remoting.UserRequest.perform(UserRequest.java:212)
        at hudson.remoting.UserRequest.perform(UserRequest.java:54)
        at hudson.remoting.Request$2.run(Request.java:369)
        at hudson.remoting.InterceptingExecutorService$1.call(InterceptingExecutorService.java:72)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:744)
Caused: java.lang.InterruptedException: java.lang.InterruptedException: no matches found within 10000
        at hudson.FilePath.act(FilePath.java:1074)
        at hudson.FilePath.act(FilePath.java:1061)
        at hudson.FilePath.validateAntFileMask(FilePath.java:2705)
        at hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:243)
        at hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81)
        at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
        at hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744)
        at hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690)
        at hudson.model.Build$BuildExecution.post2(Build.java:186)
        at hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635)
        at hudson.model.Run.execute(Run.java:1835)
        at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
        at hudson.model.ResourceController.execute(ResourceController.java:97)
        at hudson.model.Executor.run(Executor.java:429)
No artifacts found that match the file pattern "**/*.events,heapdumps/**,**/hs_err_pid*". Configuration error?
Recording test results
Build step 'Publish JUnit test result report' changed build result to UNSTABLE
Email was triggered for: Unstable (Test Failures)
Sending email for trigger: Unstable (Test Failures)


---------------------------------------------------------------------
To unsubscribe, e-mail: [hidden email]
For additional commands, e-mail: [hidden email]
Reply | Threaded
Open this post in threaded view
|

[JENKINS] Lucene-Solr-Tests-master - Build # 3643 - Still Unstable

Apache Jenkins Server-2
Build: https://builds.apache.org/job/Lucene-Solr-Tests-master/3643/

1 tests failed.
FAILED:  org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore.testRestoreFailure

Error Message:
Failed collection is still in the clusterstate: DocCollection(hdfsbackuprestore_testfailure_restored//collections/hdfsbackuprestore_testfailure_restored/state.json/2)={   "pullReplicas":0,   "replicationFactor":2,   "shards":{     "shard2":{       "range":"0-7fffffff",       "state":"construction",       "replicas":{"core_node2":{           "core":"hdfsbackuprestore_testfailure_restored_shard2_replica_n1",           "base_url":"http://127.0.0.1:39491/solr",           "node_name":"127.0.0.1:39491_solr",           "state":"down",           "type":"NRT",           "force_set_state":"false"}},       "stateTimestamp":"1567077343098341196"},     "shard1":{       "range":"80000000-ffffffff",       "state":"construction",       "replicas":{},       "stateTimestamp":"1567077343098356703"}},   "router":{"name":"compositeId"},   "maxShardsPerNode":"2",   "autoAddReplicas":"false",   "nrtReplicas":2,   "tlogReplicas":0} Expected: not a collection containing "hdfsbackuprestore_testfailure_restored"      but: was <[hdfsbackuprestore_testok, hdfsbackuprestore_testfailure_restored, hdfsbackuprestore_testfailure, hdfsbackuprestore_testok_restored]>

Stack Trace:
java.lang.AssertionError: Failed collection is still in the clusterstate: DocCollection(hdfsbackuprestore_testfailure_restored//collections/hdfsbackuprestore_testfailure_restored/state.json/2)={
  "pullReplicas":0,
  "replicationFactor":2,
  "shards":{
    "shard2":{
      "range":"0-7fffffff",
      "state":"construction",
      "replicas":{"core_node2":{
          "core":"hdfsbackuprestore_testfailure_restored_shard2_replica_n1",
          "base_url":"http://127.0.0.1:39491/solr",
          "node_name":"127.0.0.1:39491_solr",
          "state":"down",
          "type":"NRT",
          "force_set_state":"false"}},
      "stateTimestamp":"1567077343098341196"},
    "shard1":{
      "range":"80000000-ffffffff",
      "state":"construction",
      "replicas":{},
      "stateTimestamp":"1567077343098356703"}},
  "router":{"name":"compositeId"},
  "maxShardsPerNode":"2",
  "autoAddReplicas":"false",
  "nrtReplicas":2,
  "tlogReplicas":0}
Expected: not a collection containing "hdfsbackuprestore_testfailure_restored"
     but: was <[hdfsbackuprestore_testok, hdfsbackuprestore_testfailure_restored, hdfsbackuprestore_testfailure, hdfsbackuprestore_testok_restored]>
        at __randomizedtesting.SeedInfo.seed([67B07A2A706FB781:4ECCE40F5836B4AC]:0)
        at org.hamcrest.MatcherAssert.assertThat(MatcherAssert.java:20)
        at org.junit.Assert.assertThat(Assert.java:956)
        at org.apache.solr.cloud.api.collections.AbstractCloudBackupRestoreTestCase.testRestoreFailure(AbstractCloudBackupRestoreTestCase.java:211)
        at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.base/java.lang.reflect.Method.invoke(Method.java:566)
        at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1750)
        at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:938)
        at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:974)
        at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:988)
        at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
        at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
        at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
        at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
        at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:947)
        at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:832)
        at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:883)
        at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:894)
        at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
        at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
        at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
        at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at java.base/java.lang.Thread.run(Thread.java:834)




Build Log:
[...truncated 14547 lines...]
   [junit4] Suite: org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore
   [junit4]   2> 1336930 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.s.SolrTestCaseJ4 SecureRandom sanity checks: test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
   [junit4]   2> 1336931 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.s.SolrTestCaseJ4 Created dataDir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_67B07A2A706FB781-001/data-dir-156-001
   [junit4]   2> 1336932 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true) w/NUMERIC_DOCVALUES_SYSPROP=true
   [junit4]   2> 1336934 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (true) via: @org.apache.solr.util.RandomizeSSL(reason="", value=0.0/0.0, ssl=0.0/0.0, clientAuth=0.0/0.0)
   [junit4]   1> Formatting using clusterid: testClusterID
   [junit4]   2> 1337037 WARN  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.h.m.i.MetricsConfig Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties
   [junit4]   2> 1337077 WARN  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 1337086 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.e.j.s.Server jetty-9.4.19.v20190610; built: 2019-06-10T16:30:51.723Z; git: afcf563148970e98786327af5e07c261fda175d3; jvm 11.0.1+13-LTS
   [junit4]   2> 1337088 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 1337088 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 1337088 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.e.j.s.session node0 Scavenging every 600000ms
   [junit4]   2> 1337089 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@67649fbd{static,/static,jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-3.2.0-tests.jar!/webapps/static,AVAILABLE}
   [junit4]   2> 1337280 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.e.j.s.h.ContextHandler Started o.e.j.w.WebAppContext@28305a35{hdfs,/,file:///home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/jetty-localhost.localdomain-33411-hdfs-_-any-14928515097089339045.dir/webapp/,AVAILABLE}{/hdfs}
   [junit4]   2> 1337280 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.e.j.s.AbstractConnector Started ServerConnector@3468008b{HTTP/1.1,[http/1.1]}{localhost.localdomain:33411}
   [junit4]   2> 1337280 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.e.j.s.Server Started @1337406ms
   [junit4]   2> 1337466 WARN  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 1337468 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.e.j.s.Server jetty-9.4.19.v20190610; built: 2019-06-10T16:30:51.723Z; git: afcf563148970e98786327af5e07c261fda175d3; jvm 11.0.1+13-LTS
   [junit4]   2> 1337469 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 1337469 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 1337469 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.e.j.s.session node0 Scavenging every 660000ms
   [junit4]   2> 1337471 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@3c12c0b4{static,/static,jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-3.2.0-tests.jar!/webapps/static,AVAILABLE}
   [junit4]   2> 1337651 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.e.j.s.h.ContextHandler Started o.e.j.w.WebAppContext@45f8d630{datanode,/,file:///home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/jetty-localhost-36363-datanode-_-any-4787108586927148340.dir/webapp/,AVAILABLE}{/datanode}
   [junit4]   2> 1337651 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.e.j.s.AbstractConnector Started ServerConnector@35129a86{HTTP/1.1,[http/1.1]}{localhost:36363}
   [junit4]   2> 1337651 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.e.j.s.Server Started @1337777ms
   [junit4]   2> 1337866 INFO  (Block report processor) [     ] BlockStateChange BLOCK* processReport 0x653b30c9e854ae74: Processing first storage report for DS-5a2be94c-8339-4584-bbdf-da255ec91a86 from datanode 86c6b3be-7aa8-44ea-a012-5e32ef5e5cc1
   [junit4]   2> 1337868 INFO  (Block report processor) [     ] BlockStateChange BLOCK* processReport 0x653b30c9e854ae74: from storage DS-5a2be94c-8339-4584-bbdf-da255ec91a86 node DatanodeRegistration(127.0.0.1:46511, datanodeUuid=86c6b3be-7aa8-44ea-a012-5e32ef5e5cc1, infoPort=37677, infoSecurePort=0, ipcPort=40953, storageInfo=lv=-57;cid=testClusterID;nsid=1470178773;c=1567077315782), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0
   [junit4]   2> 1337868 INFO  (Block report processor) [     ] BlockStateChange BLOCK* processReport 0x653b30c9e854ae74: Processing first storage report for DS-33297746-4fd6-4a9d-bfdd-896261436b77 from datanode 86c6b3be-7aa8-44ea-a012-5e32ef5e5cc1
   [junit4]   2> 1337868 INFO  (Block report processor) [     ] BlockStateChange BLOCK* processReport 0x653b30c9e854ae74: from storage DS-33297746-4fd6-4a9d-bfdd-896261436b77 node DatanodeRegistration(127.0.0.1:46511, datanodeUuid=86c6b3be-7aa8-44ea-a012-5e32ef5e5cc1, infoPort=37677, infoSecurePort=0, ipcPort=40953, storageInfo=lv=-57;cid=testClusterID;nsid=1470178773;c=1567077315782), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0
   [junit4]   2> 1337932 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.s.c.MiniSolrCloudCluster Starting cluster of 2 servers in /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_67B07A2A706FB781-001/tempDir-002
   [junit4]   2> 1337932 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
   [junit4]   2> 1337933 INFO  (ZkTestServer Run Thread) [     ] o.a.s.c.ZkTestServer client port:0.0.0.0/0.0.0.0:0
   [junit4]   2> 1337933 INFO  (ZkTestServer Run Thread) [     ] o.a.s.c.ZkTestServer Starting server
   [junit4]   2> 1338033 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.s.c.ZkTestServer start zk server on port:42359
   [junit4]   2> 1338033 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.s.c.ZkTestServer waitForServerUp: 127.0.0.1:42359
   [junit4]   2> 1338033 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.s.c.ZkTestServer parse host and port list: 127.0.0.1:42359
   [junit4]   2> 1338033 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.s.c.ZkTestServer connecting to 127.0.0.1 42359
   [junit4]   2> 1338035 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 1338040 INFO  (zkConnectionManagerCallback-5275-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1338040 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 1338047 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 1338051 INFO  (zkConnectionManagerCallback-5277-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1338051 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 1338055 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 1338059 INFO  (zkConnectionManagerCallback-5279-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1338059 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 1338164 WARN  (jetty-launcher-5280-thread-2) [     ] o.e.j.s.AbstractConnector Ignoring deprecated socket close linger time
   [junit4]   2> 1338164 WARN  (jetty-launcher-5280-thread-1) [     ] o.e.j.s.AbstractConnector Ignoring deprecated socket close linger time
   [junit4]   2> 1338164 INFO  (jetty-launcher-5280-thread-1) [     ] o.a.s.c.s.e.JettySolrRunner Start Jetty (original configured port=0)
   [junit4]   2> 1338164 INFO  (jetty-launcher-5280-thread-2) [     ] o.a.s.c.s.e.JettySolrRunner Start Jetty (original configured port=0)
   [junit4]   2> 1338164 INFO  (jetty-launcher-5280-thread-1) [     ] o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 1 ...
   [junit4]   2> 1338164 INFO  (jetty-launcher-5280-thread-2) [     ] o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 1 ...
   [junit4]   2> 1338164 INFO  (jetty-launcher-5280-thread-1) [     ] o.e.j.s.Server jetty-9.4.19.v20190610; built: 2019-06-10T16:30:51.723Z; git: afcf563148970e98786327af5e07c261fda175d3; jvm 11.0.1+13-LTS
   [junit4]   2> 1338164 INFO  (jetty-launcher-5280-thread-2) [     ] o.e.j.s.Server jetty-9.4.19.v20190610; built: 2019-06-10T16:30:51.723Z; git: afcf563148970e98786327af5e07c261fda175d3; jvm 11.0.1+13-LTS
   [junit4]   2> 1338167 INFO  (jetty-launcher-5280-thread-2) [     ] o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 1338167 INFO  (jetty-launcher-5280-thread-2) [     ] o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 1338167 INFO  (jetty-launcher-5280-thread-2) [     ] o.e.j.s.session node0 Scavenging every 660000ms
   [junit4]   2> 1338167 INFO  (jetty-launcher-5280-thread-1) [     ] o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 1338167 INFO  (jetty-launcher-5280-thread-1) [     ] o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 1338167 INFO  (jetty-launcher-5280-thread-1) [     ] o.e.j.s.session node0 Scavenging every 660000ms
   [junit4]   2> 1338168 INFO  (jetty-launcher-5280-thread-2) [     ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@2f2f1656{/solr,null,AVAILABLE}
   [junit4]   2> 1338168 INFO  (jetty-launcher-5280-thread-1) [     ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@6f401437{/solr,null,AVAILABLE}
   [junit4]   2> 1338169 INFO  (jetty-launcher-5280-thread-2) [     ] o.e.j.s.AbstractConnector Started ServerConnector@7d4e6a7b{HTTP/1.1,[http/1.1, h2c]}{127.0.0.1:45897}
   [junit4]   2> 1338169 INFO  (jetty-launcher-5280-thread-2) [     ] o.e.j.s.Server Started @1338294ms
   [junit4]   2> 1338169 INFO  (jetty-launcher-5280-thread-2) [     ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr, hostPort=45897}
   [junit4]   2> 1338169 ERROR (jetty-launcher-5280-thread-2) [     ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 1338169 INFO  (jetty-launcher-5280-thread-2) [     ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
   [junit4]   2> 1338169 INFO  (jetty-launcher-5280-thread-2) [     ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr™ version 9.0.0
   [junit4]   2> 1338169 INFO  (jetty-launcher-5280-thread-2) [     ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 1338169 INFO  (jetty-launcher-5280-thread-2) [     ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 1338169 INFO  (jetty-launcher-5280-thread-2) [     ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2019-08-29T11:15:16.974782Z
   [junit4]   2> 1338171 INFO  (jetty-launcher-5280-thread-1) [     ] o.e.j.s.AbstractConnector Started ServerConnector@3e2263a5{HTTP/1.1,[http/1.1, h2c]}{127.0.0.1:39491}
   [junit4]   2> 1338171 INFO  (jetty-launcher-5280-thread-1) [     ] o.e.j.s.Server Started @1338297ms
   [junit4]   2> 1338171 INFO  (jetty-launcher-5280-thread-1) [     ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr, hostPort=39491}
   [junit4]   2> 1338171 ERROR (jetty-launcher-5280-thread-1) [     ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 1338171 INFO  (jetty-launcher-5280-thread-1) [     ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
   [junit4]   2> 1338171 INFO  (jetty-launcher-5280-thread-1) [     ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr™ version 9.0.0
   [junit4]   2> 1338171 INFO  (jetty-launcher-5280-thread-1) [     ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 1338171 INFO  (jetty-launcher-5280-thread-1) [     ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 1338171 INFO  (jetty-launcher-5280-thread-1) [     ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2019-08-29T11:15:16.976770Z
   [junit4]   2> 1338172 INFO  (jetty-launcher-5280-thread-2) [     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 1338172 INFO  (jetty-launcher-5280-thread-1) [     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 1338173 INFO  (zkConnectionManagerCallback-5282-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1338174 INFO  (jetty-launcher-5280-thread-2) [     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 1338174 INFO  (zkConnectionManagerCallback-5284-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1338174 INFO  (jetty-launcher-5280-thread-1) [     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 1338174 INFO  (jetty-launcher-5280-thread-2) [     ] o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading...
   [junit4]   2> 1338174 INFO  (jetty-launcher-5280-thread-1) [     ] o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading...
   [junit4]   2> 1338182 INFO  (jetty-launcher-5280-thread-2) [     ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@629bbab8, but no JMX reporters were configured - adding default JMX reporter.
   [junit4]   2> 1338184 INFO  (jetty-launcher-5280-thread-1) [     ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@629bbab8, but no JMX reporters were configured - adding default JMX reporter.
   [junit4]   2> 1338445 INFO  (jetty-launcher-5280-thread-2) [     ] o.a.s.h.c.HttpShardHandlerFactory Host whitelist initialized: WhitelistHostChecker [whitelistHosts=null, whitelistHostCheckingEnabled=true]
   [junit4]   2> 1338447 WARN  (jetty-launcher-5280-thread-2) [     ] o.e.j.u.s.S.config Trusting all certificates configured for Client@1cad7576[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 1338447 WARN  (jetty-launcher-5280-thread-2) [     ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for Client@1cad7576[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 1338450 WARN  (jetty-launcher-5280-thread-2) [     ] o.e.j.u.s.S.config Trusting all certificates configured for Client@3c60741a[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 1338450 WARN  (jetty-launcher-5280-thread-2) [     ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for Client@3c60741a[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 1338452 INFO  (jetty-launcher-5280-thread-2) [     ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:42359/solr
   [junit4]   2> 1338454 INFO  (jetty-launcher-5280-thread-2) [     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 1338458 INFO  (zkConnectionManagerCallback-5292-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1338458 INFO  (jetty-launcher-5280-thread-2) [     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 1338503 INFO  (jetty-launcher-5280-thread-1) [     ] o.a.s.h.c.HttpShardHandlerFactory Host whitelist initialized: WhitelistHostChecker [whitelistHosts=null, whitelistHostCheckingEnabled=true]
   [junit4]   2> 1338504 WARN  (jetty-launcher-5280-thread-1) [     ] o.e.j.u.s.S.config Trusting all certificates configured for Client@7e6312ff[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 1338504 WARN  (jetty-launcher-5280-thread-1) [     ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for Client@7e6312ff[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 1338508 WARN  (jetty-launcher-5280-thread-1) [     ] o.e.j.u.s.S.config Trusting all certificates configured for Client@2ab844a7[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 1338508 WARN  (jetty-launcher-5280-thread-1) [     ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for Client@2ab844a7[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 1338509 INFO  (jetty-launcher-5280-thread-1) [     ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:42359/solr
   [junit4]   2> 1338510 INFO  (jetty-launcher-5280-thread-1) [     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 1338512 INFO  (zkConnectionManagerCallback-5298-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1338512 INFO  (jetty-launcher-5280-thread-1) [     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 1338561 INFO  (jetty-launcher-5280-thread-2) [n:127.0.0.1:45897_solr     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 1338562 INFO  (zkConnectionManagerCallback-5300-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1338562 INFO  (jetty-launcher-5280-thread-2) [n:127.0.0.1:45897_solr     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 1338662 INFO  (jetty-launcher-5280-thread-1) [n:127.0.0.1:39491_solr     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 1338664 INFO  (zkConnectionManagerCallback-5304-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1338664 INFO  (jetty-launcher-5280-thread-1) [n:127.0.0.1:39491_solr     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 1338683 INFO  (jetty-launcher-5280-thread-1) [n:127.0.0.1:39491_solr     ] o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:39491_solr
   [junit4]   2> 1338684 INFO  (jetty-launcher-5280-thread-1) [n:127.0.0.1:39491_solr     ] o.a.s.c.Overseer Overseer (id=72286898665095176-127.0.0.1:39491_solr-n_0000000000) starting
   [junit4]   2> 1338687 INFO  (jetty-launcher-5280-thread-2) [n:127.0.0.1:45897_solr     ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:45897_solr
   [junit4]   2> 1338689 INFO  (zkCallback-5299-thread-1) [     ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 1338689 INFO  (zkCallback-5303-thread-1) [     ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 1338696 INFO  (jetty-launcher-5280-thread-2) [n:127.0.0.1:45897_solr     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 1338697 INFO  (jetty-launcher-5280-thread-1) [n:127.0.0.1:39491_solr     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 1338697 INFO  (zkConnectionManagerCallback-5311-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1338697 INFO  (jetty-launcher-5280-thread-2) [n:127.0.0.1:45897_solr     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 1338698 INFO  (zkConnectionManagerCallback-5316-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1338698 INFO  (jetty-launcher-5280-thread-1) [n:127.0.0.1:39491_solr     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 1338699 INFO  (jetty-launcher-5280-thread-2) [n:127.0.0.1:45897_solr     ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 1338701 INFO  (jetty-launcher-5280-thread-1) [n:127.0.0.1:39491_solr     ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 1338701 INFO  (jetty-launcher-5280-thread-2) [n:127.0.0.1:45897_solr     ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:42359/solr ready
   [junit4]   2> 1338702 INFO  (jetty-launcher-5280-thread-2) [n:127.0.0.1:45897_solr     ] o.a.s.c.PackageManager clusterprops.json changed , version -1
   [junit4]   2> 1338702 INFO  (jetty-launcher-5280-thread-2) [n:127.0.0.1:45897_solr     ] o.a.s.c.b.r.BackupRepositoryFactory Added backup repository with configuration params {type = repository,name = hdfs,class = org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes = {name=hdfs, class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args = {location=/backup,solr.hdfs.home=hdfs://localhost.localdomain:38187/solr,solr.hdfs.confdir=}}
   [junit4]   2> 1338702 INFO  (jetty-launcher-5280-thread-2) [n:127.0.0.1:45897_solr     ] o.a.s.c.b.r.BackupRepositoryFactory Added backup repository with configuration params {type = repository,name = poisioned,class = org.apache.solr.cloud.api.collections.TestLocalFSCloudBackupRestore$PoinsionedRepository,attributes = {default=true, name=poisioned, class=org.apache.solr.cloud.api.collections.TestLocalFSCloudBackupRestore$PoinsionedRepository},}
   [junit4]   2> 1338702 INFO  (jetty-launcher-5280-thread-2) [n:127.0.0.1:45897_solr     ] o.a.s.c.b.r.BackupRepositoryFactory Default configuration for backup repository is with configuration params {type = repository,name = poisioned,class = org.apache.solr.cloud.api.collections.TestLocalFSCloudBackupRestore$PoinsionedRepository,attributes = {default=true, name=poisioned, class=org.apache.solr.cloud.api.collections.TestLocalFSCloudBackupRestore$PoinsionedRepository},}
   [junit4]   2> 1338703 INFO  (jetty-launcher-5280-thread-1) [n:127.0.0.1:39491_solr     ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:42359/solr ready
   [junit4]   2> 1338712 INFO  (OverseerStateUpdate-72286898665095176-127.0.0.1:39491_solr-n_0000000000) [n:127.0.0.1:39491_solr     ] o.a.s.c.Overseer Starting to work on the main queue : 127.0.0.1:39491_solr
   [junit4]   2> 1338714 INFO  (jetty-launcher-5280-thread-1) [n:127.0.0.1:39491_solr     ] o.a.s.c.ZkController Publish node=127.0.0.1:39491_solr as DOWN
   [junit4]   2> 1338717 INFO  (jetty-launcher-5280-thread-1) [n:127.0.0.1:39491_solr     ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 2147483647 transient cores
   [junit4]   2> 1338717 INFO  (jetty-launcher-5280-thread-1) [n:127.0.0.1:39491_solr     ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:39491_solr
   [junit4]   2> 1338719 INFO  (zkCallback-5299-thread-1) [     ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 1338719 INFO  (zkCallback-5303-thread-1) [     ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 1338720 INFO  (jetty-launcher-5280-thread-1) [n:127.0.0.1:39491_solr     ] o.a.s.c.PackageManager clusterprops.json changed , version -1
   [junit4]   2> 1338720 INFO  (jetty-launcher-5280-thread-1) [n:127.0.0.1:39491_solr     ] o.a.s.c.b.r.BackupRepositoryFactory Added backup repository with configuration params {type = repository,name = hdfs,class = org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes = {name=hdfs, class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args = {location=/backup,solr.hdfs.home=hdfs://localhost.localdomain:38187/solr,solr.hdfs.confdir=}}
   [junit4]   2> 1338720 INFO  (jetty-launcher-5280-thread-1) [n:127.0.0.1:39491_solr     ] o.a.s.c.b.r.BackupRepositoryFactory Added backup repository with configuration params {type = repository,name = poisioned,class = org.apache.solr.cloud.api.collections.TestLocalFSCloudBackupRestore$PoinsionedRepository,attributes = {default=true, name=poisioned, class=org.apache.solr.cloud.api.collections.TestLocalFSCloudBackupRestore$PoinsionedRepository},}
   [junit4]   2> 1338721 INFO  (jetty-launcher-5280-thread-1) [n:127.0.0.1:39491_solr     ] o.a.s.c.b.r.BackupRepositoryFactory Default configuration for backup repository is with configuration params {type = repository,name = poisioned,class = org.apache.solr.cloud.api.collections.TestLocalFSCloudBackupRestore$PoinsionedRepository,attributes = {default=true, name=poisioned, class=org.apache.solr.cloud.api.collections.TestLocalFSCloudBackupRestore$PoinsionedRepository},}
   [junit4]   2> 1338731 INFO  (zkCallback-5310-thread-1) [     ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 1338736 INFO  (jetty-launcher-5280-thread-2) [n:127.0.0.1:45897_solr     ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
   [junit4]   2> 1338741 INFO  (zkCallback-5315-thread-1) [     ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 1338743 INFO  (jetty-launcher-5280-thread-1) [n:127.0.0.1:39491_solr     ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
   [junit4]   2> 1338776 INFO  (jetty-launcher-5280-thread-2) [n:127.0.0.1:45897_solr     ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@629bbab8
   [junit4]   2> 1338782 INFO  (jetty-launcher-5280-thread-1) [n:127.0.0.1:39491_solr     ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@629bbab8
   [junit4]   2> 1338802 INFO  (jetty-launcher-5280-thread-2) [n:127.0.0.1:45897_solr     ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@629bbab8
   [junit4]   2> 1338802 INFO  (jetty-launcher-5280-thread-2) [n:127.0.0.1:45897_solr     ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@629bbab8
   [junit4]   2> 1338805 INFO  (jetty-launcher-5280-thread-2) [n:127.0.0.1:45897_solr     ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_67B07A2A706FB781-001/tempDir-002/node2/.
   [junit4]   2> 1338811 INFO  (jetty-launcher-5280-thread-1) [n:127.0.0.1:39491_solr     ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@629bbab8
   [junit4]   2> 1338811 INFO  (jetty-launcher-5280-thread-1) [n:127.0.0.1:39491_solr     ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@629bbab8
   [junit4]   2> 1338814 INFO  (jetty-launcher-5280-thread-1) [n:127.0.0.1:39491_solr     ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_67B07A2A706FB781-001/tempDir-002/node1/.
   [junit4]   2> 1338917 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.s.c.MiniSolrCloudCluster waitForAllNodes: numServers=2
   [junit4]   2> 1338918 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
   [junit4]   2> 1338922 INFO  (zkConnectionManagerCallback-5323-thread-1) [     ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1338922 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
   [junit4]   2> 1338925 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
   [junit4]   2> 1338927 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[67B07A2A706FB781]-worker) [     ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:42359/solr ready
   [junit4]   2> 1339054 INFO  (TEST-TestHdfsCloudBackupRestore.test-seed#[67B07A2A706FB781]) [     ] o.a.s.SolrTestCaseJ4 ###Starting test
   [junit4]   2> 1339057 INFO  (qtp820349897-14662) [n:127.0.0.1:39491_solr     ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params pullReplicas=0&property.customKey=customValue&collection.configName=conf1&maxShardsPerNode=-1&name=hdfsbackuprestore_testok&nrtReplicas=2&action=CREATE&numShards=2&tlogReplicas=0&wt=javabin&version=2 and sendToOCPQueue=true
   [junit4]   2> 1339063 INFO  (OverseerThreadFactory-4926-thread-1-processing-n:127.0.0.1:39491_solr) [n:127.0.0.1:39491_solr     ] o.a.s.c.a.c.CreateCollectionCmd Create collection hdfsbackuprestore_testok
   [junit4]   2> 1339182 INFO  (OverseerStateUpdate-72286898665095176-127.0.0.1:39491_solr-n_0000000000) [n:127.0.0.1:39491_solr     ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore_testok",
   [junit4]   2>   "shard":"shard1",
   [junit4]   2>   "core":"hdfsbackuprestore_testok_shard1_replica_n1",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"http://127.0.0.1:39491/solr",
   [junit4]   2>   "type":"NRT",
   [junit4]   2>   "waitForFinalState":"false"}
   [junit4]   2> 1339186 INFO  (OverseerStateUpdate-72286898665095176-127.0.0.1:39491_solr-n_0000000000) [n:127.0.0.1:39491_solr     ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore_testok",
   [junit4]   2>   "shard":"shard1",
   [junit4]   2>   "core":"hdfsbackuprestore_testok_shard1_replica_n2",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"http://127.0.0.1:45897/solr",
   [junit4]   2>   "type":"NRT",
   [junit4]   2>   "waitForFinalState":"false"}
   [junit4]   2> 1339189 INFO  (OverseerStateUpdate-72286898665095176-127.0.0.1:39491_solr-n_0000000000) [n:127.0.0.1:39491_solr     ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore_testok",
   [junit4]   2>   "shard":"shard2",
   [junit4]   2>   "core":"hdfsbackuprestore_testok_shard2_replica_n4",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"http://127.0.0.1:39491/solr",
   [junit4]   2>   "type":"NRT",
   [junit4]   2>   "waitForFinalState":"false"}
   [junit4]   2> 1339193 INFO  (OverseerStateUpdate-72286898665095176-127.0.0.1:39491_solr-n_0000000000) [n:127.0.0.1:39491_solr     ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore_testok",
   [junit4]   2>   "shard":"shard2",
   [junit4]   2>   "core":"hdfsbackuprestore_testok_shard2_replica_n6",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"http://127.0.0.1:45897/solr",
   [junit4]   2>   "type":"NRT",
   [junit4]   2>   "waitForFinalState":"false"}
   [junit4]   2> 1339398 INFO  (qtp820349897-14666) [n:127.0.0.1:39491_solr    x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore_testok&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node3&name=hdfsbackuprestore_testok_shard1_replica_n1&action=CREATE&numShards=2&shard=shard1&wt=javabin
   [junit4]   2> 1339398 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr    x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore_testok&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node8&name=hdfsbackuprestore_testok_shard2_replica_n6&action=CREATE&numShards=2&shard=shard2&wt=javabin
   [junit4]   2> 1339398 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr    x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 2147483647 transient cores
   [junit4]   2> 1339399 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr    x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore_testok&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node5&name=hdfsbackuprestore_testok_shard1_replica_n2&action=CREATE&numShards=2&shard=shard1&wt=javabin
   [junit4]   2> 1339399 INFO  (qtp820349897-14660) [n:127.0.0.1:39491_solr    x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore_testok&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node7&name=hdfsbackuprestore_testok_shard2_replica_n4&action=CREATE&numShards=2&shard=shard2&wt=javabin
   [junit4]   2> 1340425 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.c.SolrConfig Using Lucene MatchVersion: 9.0.0
   [junit4]   2> 1340440 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.c.SolrConfig Using Lucene MatchVersion: 9.0.0
   [junit4]   2> 1340443 INFO  (qtp820349897-14660) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.c.SolrConfig Using Lucene MatchVersion: 9.0.0
   [junit4]   2> 1340443 INFO  (qtp820349897-14666) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.c.SolrConfig Using Lucene MatchVersion: 9.0.0
   [junit4]   2> 1340450 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.s.IndexSchema [hdfsbackuprestore_testok_shard1_replica_n2] Schema name=minimal
   [junit4]   2> 1340452 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.s.IndexSchema [hdfsbackuprestore_testok_shard2_replica_n6] Schema name=minimal
   [junit4]   2> 1340453 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id
   [junit4]   2> 1340453 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.c.CoreContainer Creating SolrCore 'hdfsbackuprestore_testok_shard1_replica_n2' using configuration from collection hdfsbackuprestore_testok, trusted=true
   [junit4]   2> 1340454 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.hdfsbackuprestore_testok.shard1.replica_n2' (registry 'solr.core.hdfsbackuprestore_testok.shard1.replica_n2') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@629bbab8
   [junit4]   2> 1340454 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.c.SolrCore [[hdfsbackuprestore_testok_shard1_replica_n2] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_67B07A2A706FB781-001/tempDir-002/node2/hdfsbackuprestore_testok_shard1_replica_n2], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_67B07A2A706FB781-001/tempDir-002/node2/./hdfsbackuprestore_testok_shard1_replica_n2/data/]
   [junit4]   2> 1340455 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id
   [junit4]   2> 1340455 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.c.CoreContainer Creating SolrCore 'hdfsbackuprestore_testok_shard2_replica_n6' using configuration from collection hdfsbackuprestore_testok, trusted=true
   [junit4]   2> 1340455 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.hdfsbackuprestore_testok.shard2.replica_n6' (registry 'solr.core.hdfsbackuprestore_testok.shard2.replica_n6') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@629bbab8
   [junit4]   2> 1340455 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.c.SolrCore [[hdfsbackuprestore_testok_shard2_replica_n6] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_67B07A2A706FB781-001/tempDir-002/node2/hdfsbackuprestore_testok_shard2_replica_n6], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_67B07A2A706FB781-001/tempDir-002/node2/./hdfsbackuprestore_testok_shard2_replica_n6/data/]
   [junit4]   2> 1340463 INFO  (qtp820349897-14660) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.s.IndexSchema [hdfsbackuprestore_testok_shard2_replica_n4] Schema name=minimal
   [junit4]   2> 1340464 INFO  (qtp820349897-14666) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.s.IndexSchema [hdfsbackuprestore_testok_shard1_replica_n1] Schema name=minimal
   [junit4]   2> 1340467 INFO  (qtp820349897-14666) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id
   [junit4]   2> 1340467 INFO  (qtp820349897-14666) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.c.CoreContainer Creating SolrCore 'hdfsbackuprestore_testok_shard1_replica_n1' using configuration from collection hdfsbackuprestore_testok, trusted=true
   [junit4]   2> 1340468 INFO  (qtp820349897-14666) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.hdfsbackuprestore_testok.shard1.replica_n1' (registry 'solr.core.hdfsbackuprestore_testok.shard1.replica_n1') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@629bbab8
   [junit4]   2> 1340470 INFO  (qtp820349897-14666) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.c.SolrCore [[hdfsbackuprestore_testok_shard1_replica_n1] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_67B07A2A706FB781-001/tempDir-002/node1/hdfsbackuprestore_testok_shard1_replica_n1], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_67B07A2A706FB781-001/tempDir-002/node1/./hdfsbackuprestore_testok_shard1_replica_n1/data/]
   [junit4]   2> 1340470 INFO  (qtp820349897-14660) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id
   [junit4]   2> 1340470 INFO  (qtp820349897-14660) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.c.CoreContainer Creating SolrCore 'hdfsbackuprestore_testok_shard2_replica_n4' using configuration from collection hdfsbackuprestore_testok, trusted=true
   [junit4]   2> 1340471 INFO  (qtp820349897-14660) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.hdfsbackuprestore_testok.shard2.replica_n4' (registry 'solr.core.hdfsbackuprestore_testok.shard2.replica_n4') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@629bbab8
   [junit4]   2> 1340471 INFO  (qtp820349897-14660) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.c.SolrCore [[hdfsbackuprestore_testok_shard2_replica_n4] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_67B07A2A706FB781-001/tempDir-002/node1/hdfsbackuprestore_testok_shard2_replica_n4], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_67B07A2A706FB781-001/tempDir-002/node1/./hdfsbackuprestore_testok_shard2_replica_n4/data/]
   [junit4]   2> 1340590 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 1340590 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 1340594 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 1340594 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 1340600 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 1340600 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 1340601 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 1340601 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 1340605 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.s.SolrIndexSearcher Opening [Searcher@4138a384[hdfsbackuprestore_testok_shard2_replica_n6] main]
   [junit4]   2> 1340605 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.s.SolrIndexSearcher Opening [Searcher@2531807a[hdfsbackuprestore_testok_shard1_replica_n2] main]
   [junit4]   2> 1340608 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 1340608 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 1340608 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 1340609 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 1340609 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
   [junit4]   2> 1340609 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1643199667281854464
   [junit4]   2> 1340610 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
   [junit4]   2> 1340610 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1643199667282903040
   [junit4]   2> 1340616 INFO  (searcherExecutor-4936-thread-1-processing-n:127.0.0.1:45897_solr x:hdfsbackuprestore_testok_shard2_replica_n6 c:hdfsbackuprestore_testok s:shard2 r:core_node8) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.c.SolrCore [hdfsbackuprestore_testok_shard2_replica_n6] Registered new searcher Searcher@4138a384[hdfsbackuprestore_testok_shard2_replica_n6] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 1340616 INFO  (searcherExecutor-4935-thread-1-processing-n:127.0.0.1:45897_solr x:hdfsbackuprestore_testok_shard1_replica_n2 c:hdfsbackuprestore_testok s:shard1 r:core_node5) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.c.SolrCore [hdfsbackuprestore_testok_shard1_replica_n2] Registered new searcher Searcher@2531807a[hdfsbackuprestore_testok_shard1_replica_n2] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 1340621 INFO  (qtp820349897-14666) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 1340621 INFO  (qtp820349897-14666) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 1340621 INFO  (qtp820349897-14660) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 1340621 INFO  (qtp820349897-14660) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 1340623 INFO  (qtp820349897-14666) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 1340623 INFO  (qtp820349897-14666) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 1340623 INFO  (qtp820349897-14660) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 1340623 INFO  (qtp820349897-14660) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 1340626 INFO  (qtp820349897-14666) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.s.SolrIndexSearcher Opening [Searcher@3951f7f0[hdfsbackuprestore_testok_shard1_replica_n1] main]
   [junit4]   2> 1340627 INFO  (qtp820349897-14660) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.s.SolrIndexSearcher Opening [Searcher@5176bf3[hdfsbackuprestore_testok_shard2_replica_n4] main]
   [junit4]   2> 1340627 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.c.ZkShardTerms Successful update of terms at /collections/hdfsbackuprestore_testok/terms/shard1 to Terms{values={core_node5=0}, version=0}
   [junit4]   2> 1340627 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.c.ShardLeaderElectionContextBase make sure parent is created /collections/hdfsbackuprestore_testok/leaders/shard1
   [junit4]   2> 1340629 INFO  (qtp820349897-14666) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 1340630 INFO  (qtp820349897-14660) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 1340630 INFO  (qtp820349897-14666) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 1340631 INFO  (qtp820349897-14660) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 1340631 INFO  (qtp820349897-14666) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
   [junit4]   2> 1340631 INFO  (qtp820349897-14666) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1643199667304923136
   [junit4]   2> 1340631 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.c.ZkShardTerms Successful update of terms at /collections/hdfsbackuprestore_testok/terms/shard2 to Terms{values={core_node8=0}, version=0}
   [junit4]   2> 1340631 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.c.ShardLeaderElectionContextBase make sure parent is created /collections/hdfsbackuprestore_testok/leaders/shard2
   [junit4]   2> 1340632 INFO  (qtp820349897-14660) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
   [junit4]   2> 1340632 INFO  (qtp820349897-14660) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1643199667305971712
   [junit4]   2> 1340636 INFO  (searcherExecutor-4937-thread-1-processing-n:127.0.0.1:39491_solr x:hdfsbackuprestore_testok_shard1_replica_n1 c:hdfsbackuprestore_testok s:shard1 r:core_node3) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.c.SolrCore [hdfsbackuprestore_testok_shard1_replica_n1] Registered new searcher Searcher@3951f7f0[hdfsbackuprestore_testok_shard1_replica_n1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 1340636 INFO  (searcherExecutor-4938-thread-1-processing-n:127.0.0.1:39491_solr x:hdfsbackuprestore_testok_shard2_replica_n4 c:hdfsbackuprestore_testok s:shard2 r:core_node7) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.c.SolrCore [hdfsbackuprestore_testok_shard2_replica_n4] Registered new searcher Searcher@5176bf3[hdfsbackuprestore_testok_shard2_replica_n4] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 1340637 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.c.ShardLeaderElectionContext Waiting until we see more replicas up for shard shard1: total=2 found=1 timeoutin=9998ms
   [junit4]   2> 1340640 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.c.ShardLeaderElectionContext Waiting until we see more replicas up for shard shard2: total=2 found=1 timeoutin=9998ms
   [junit4]   2> 1340642 INFO  (qtp820349897-14666) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.c.ZkShardTerms Successful update of terms at /collections/hdfsbackuprestore_testok/terms/shard1 to Terms{values={core_node3=0, core_node5=0}, version=1}
   [junit4]   2> 1340642 INFO  (qtp820349897-14666) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContextBase make sure parent is created /collections/hdfsbackuprestore_testok/leaders/shard1
   [junit4]   2> 1340645 INFO  (qtp820349897-14660) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.c.ZkShardTerms Successful update of terms at /collections/hdfsbackuprestore_testok/terms/shard2 to Terms{values={core_node7=0, core_node8=0}, version=1}
   [junit4]   2> 1340645 INFO  (qtp820349897-14660) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.c.ShardLeaderElectionContextBase make sure parent is created /collections/hdfsbackuprestore_testok/leaders/shard2
   [junit4]   2> 1341139 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 1341139 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 1341139 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:45897/solr/hdfsbackuprestore_testok_shard1_replica_n2/
   [junit4]   2> 1341144 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.u.PeerSync PeerSync: core=hdfsbackuprestore_testok_shard1_replica_n2 url=http://127.0.0.1:45897/solr START replicas=[http://127.0.0.1:39491/solr/hdfsbackuprestore_testok_shard1_replica_n1/] nUpdates=100
   [junit4]   2> 1341144 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.u.PeerSync PeerSync: core=hdfsbackuprestore_testok_shard1_replica_n2 url=http://127.0.0.1:45897/solr DONE.  We have no versions.  sync failed.
   [junit4]   2> 1341145 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 1341146 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 1341146 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:45897/solr/hdfsbackuprestore_testok_shard2_replica_n6/
   [junit4]   2> 1341147 INFO  (qtp820349897-14668) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.c.S.Request [hdfsbackuprestore_testok_shard1_replica_n1]  webapp=/solr path=/get params={distrib=false&qt=/get&fingerprint=false&getVersions=100&wt=javabin&version=2} status=0 QTime=1
   [junit4]   2> 1341148 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.u.PeerSync PeerSync: core=hdfsbackuprestore_testok_shard2_replica_n6 url=http://127.0.0.1:45897/solr START replicas=[http://127.0.0.1:39491/solr/hdfsbackuprestore_testok_shard2_replica_n4/] nUpdates=100
   [junit4]   2> 1341148 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.u.PeerSync PeerSync: core=hdfsbackuprestore_testok_shard2_replica_n6 url=http://127.0.0.1:45897/solr DONE.  We have no versions.  sync failed.
   [junit4]   2> 1341152 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.c.SyncStrategy Leader's attempt to sync with shard failed, moving to the next candidate
   [junit4]   2> 1341152 INFO  (qtp820349897-14664) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.c.S.Request [hdfsbackuprestore_testok_shard2_replica_n4]  webapp=/solr path=/get params={distrib=false&qt=/get&fingerprint=false&getVersions=100&wt=javabin&version=2} status=0 QTime=1
   [junit4]   2> 1341152 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.c.ShardLeaderElectionContext We failed sync, but we have no versions - we can't sync in that case - we were active before, so become leader anyway
   [junit4]   2> 1341152 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.c.ShardLeaderElectionContextBase Creating leader registration node /collections/hdfsbackuprestore_testok/leaders/shard1/leader after winning as /collections/hdfsbackuprestore_testok/leader_elect/shard1/election/72286898665095175-core_node5-n_0000000000
   [junit4]   2> 1341152 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.c.SyncStrategy Leader's attempt to sync with shard failed, moving to the next candidate
   [junit4]   2> 1341152 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.c.ShardLeaderElectionContext We failed sync, but we have no versions - we can't sync in that case - we were active before, so become leader anyway
   [junit4]   2> 1341152 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.c.ShardLeaderElectionContextBase Creating leader registration node /collections/hdfsbackuprestore_testok/leaders/shard2/leader after winning as /collections/hdfsbackuprestore_testok/leader_elect/shard2/election/72286898665095175-core_node8-n_0000000000
   [junit4]   2> 1341156 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:45897/solr/hdfsbackuprestore_testok_shard1_replica_n2/ shard1
   [junit4]   2> 1341156 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:45897/solr/hdfsbackuprestore_testok_shard2_replica_n6/ shard2
   [junit4]   2> 1341260 INFO  (zkCallback-5299-thread-2) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testok/state.json] for collection [hdfsbackuprestore_testok] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1341261 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 1341262 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 1341265 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore_testok&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node8&name=hdfsbackuprestore_testok_shard2_replica_n6&action=CREATE&numShards=2&shard=shard2&wt=javabin} status=0 QTime=1867
   [junit4]   2> 1341266 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore_testok&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node5&name=hdfsbackuprestore_testok_shard1_replica_n2&action=CREATE&numShards=2&shard=shard1&wt=javabin} status=0 QTime=1867
   [junit4]   2> 1341366 INFO  (zkCallback-5299-thread-2) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testok/state.json] for collection [hdfsbackuprestore_testok] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1341366 INFO  (zkCallback-5299-thread-1) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testok/state.json] for collection [hdfsbackuprestore_testok] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1341651 INFO  (qtp820349897-14666) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore_testok&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node3&name=hdfsbackuprestore_testok_shard1_replica_n1&action=CREATE&numShards=2&shard=shard1&wt=javabin} status=0 QTime=2254
   [junit4]   2> 1341652 INFO  (qtp820349897-14660) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore_testok&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node7&name=hdfsbackuprestore_testok_shard2_replica_n4&action=CREATE&numShards=2&shard=shard2&wt=javabin} status=0 QTime=2253
   [junit4]   2> 1341657 INFO  (qtp820349897-14662) [n:127.0.0.1:39491_solr     ] o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 45 seconds. Check all shard replicas
   [junit4]   2> 1341754 INFO  (zkCallback-5299-thread-2) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testok/state.json] for collection [hdfsbackuprestore_testok] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1341754 INFO  (zkCallback-5299-thread-1) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testok/state.json] for collection [hdfsbackuprestore_testok] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1341754 INFO  (zkCallback-5303-thread-1) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testok/state.json] for collection [hdfsbackuprestore_testok] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1341754 INFO  (zkCallback-5303-thread-2) [     ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore_testok/state.json] for collection [hdfsbackuprestore_testok] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1341756 INFO  (qtp820349897-14662) [n:127.0.0.1:39491_solr     ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={pullReplicas=0&property.customKey=customValue&collection.configName=conf1&maxShardsPerNode=-1&name=hdfsbackuprestore_testok&nrtReplicas=2&action=CREATE&numShards=2&tlogReplicas=0&wt=javabin&version=2} status=0 QTime=2698
   [junit4]   2> 1341768 INFO  (qtp1938397818-14661) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.c.ZkShardTerms Successful update of terms at /collections/hdfsbackuprestore_testok/terms/shard2 to Terms{values={core_node7=1, core_node8=1}, version=2}
   [junit4]   2> 1341783 INFO  (qtp820349897-14666) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_testok_shard2_replica_n4]  webapp=/solr path=/update params={update.distrib=FROMLEADER&distrib.from=http://127.0.0.1:45897/solr/hdfsbackuprestore_testok_shard2_replica_n6/&wt=javabin&version=2}{add=[2 (1643199668489814016), 3 (1643199668492959744), 5 (1643199668492959745), 6 (1643199668492959746), 7 (1643199668492959747), 9 (1643199668492959748), 17 (1643199668494008320), 18 (1643199668494008321), 19 (1643199668494008322), 21 (1643199668494008323), ... (32 adds)]} 0 12
   [junit4]   2> 1341783 ERROR (updateExecutor-5288-thread-1-processing-x:hdfsbackuprestore_testok_shard2_replica_n6 r:core_node8 null n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.c.s.i.ConcurrentUpdateHttp2SolrClient Error consuming and closing http response stream.
   [junit4]   2>           => java.nio.channels.AsynchronousCloseException
   [junit4]   2> at org.eclipse.jetty.client.util.InputStreamResponseListener$Input.read(InputStreamResponseListener.java:316)
   [junit4]   2> java.nio.channels.AsynchronousCloseException: null
   [junit4]   2> at org.eclipse.jetty.client.util.InputStreamResponseListener$Input.read(InputStreamResponseListener.java:316) ~[jetty-client-9.4.19.v20190610.jar:9.4.19.v20190610]
   [junit4]   2> at java.io.InputStream.read(InputStream.java:205) ~[?:?]
   [junit4]   2> at org.eclipse.jetty.client.util.InputStreamResponseListener$Input.read(InputStreamResponseListener.java:287) ~[jetty-client-9.4.19.v20190610.jar:9.4.19.v20190610]
   [junit4]   2> at org.apache.solr.client.solrj.impl.ConcurrentUpdateHttp2SolrClient$Runner.sendUpdateStream(ConcurrentUpdateHttp2SolrClient.java:283) ~[java/:?]
   [junit4]   2> at org.apache.solr.client.solrj.impl.ConcurrentUpdateHttp2SolrClient$Runner.run(ConcurrentUpdateHttp2SolrClient.java:176) ~[java/:?]
   [junit4]   2> at com.codahale.metrics.InstrumentedExecutorService$InstrumentedRunnable.run(InstrumentedExecutorService.java:181) ~[metrics-core-4.0.5.jar:4.0.5]
   [junit4]   2> at org.apache.solr.common.util.ExecutorUtil$MDCAwareThreadPoolExecutor.lambda$execute$0(ExecutorUtil.java:210) ~[java/:?]
   [junit4]   2> at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) ~[?:?]
   [junit4]   2> at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) ~[?:?]
   [junit4]   2> at java.lang.Thread.run(Thread.java:834) [?:?]
   [junit4]   2> 1341784 INFO  (qtp1938397818-14661) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_testok_shard2_replica_n6]  webapp=/solr path=/update params={wt=javabin&version=2}{add=[2 (1643199668489814016), 3 (1643199668492959744), 5 (1643199668492959745), 6 (1643199668492959746), 7 (1643199668492959747), 9 (1643199668492959748), 17 (1643199668494008320), 18 (1643199668494008321), 19 (1643199668494008322), 21 (1643199668494008323), ... (32 adds)]} 0 24
   [junit4]   2> 1341794 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.c.ZkShardTerms Successful update of terms at /collections/hdfsbackuprestore_testok/terms/shard1 to Terms{values={core_node3=1, core_node5=1}, version=2}
   [junit4]   2> 1341794 INFO  (qtp820349897-14660) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_testok_shard1_replica_n1]  webapp=/solr path=/update params={update.distrib=FROMLEADER&distrib.from=http://127.0.0.1:45897/solr/hdfsbackuprestore_testok_shard1_replica_n2/&wt=javabin&version=2}{add=[0 (1643199668489814016), 1 (1643199668492959744), 4 (1643199668492959745), 8 (1643199668492959746), 10 (1643199668494008320), 11 (1643199668494008321), 12 (1643199668494008322), 13 (1643199668494008323), 14 (1643199668494008324), 15 (1643199668494008325), ... (49 adds)]} 0 24
   [junit4]   2> 1341795 INFO  (qtp1938397818-14663) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_testok_shard1_replica_n2]  webapp=/solr path=/update params={wt=javabin&version=2}{add=[0 (1643199668489814016), 1 (1643199668492959744), 4 (1643199668492959745), 8 (1643199668492959746), 10 (1643199668494008320), 11 (1643199668494008321), 12 (1643199668494008322), 13 (1643199668494008323), 14 (1643199668494008324), 15 (1643199668494008325), ... (49 adds)]} 0 36
   [junit4]   2> 1341802 INFO  (qtp1938397818-14661) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1643199668532805632,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 1341802 INFO  (qtp1938397818-14661) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.u.SolrIndexWriter Calling setCommitData with IW:org.apache.solr.update.SolrIndexWriter@3c8ca0c3 commitCommandVersion:1643199668532805632
   [junit4]   2> 1341803 INFO  (qtp820349897-14662) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1643199668533854208,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 1341803 INFO  (qtp820349897-14662) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.u.SolrIndexWriter Calling setCommitData with IW:org.apache.solr.update.SolrIndexWriter@67978936 commitCommandVersion:1643199668533854208
   [junit4]   2> 1341989 INFO  (qtp820349897-14662) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.s.SolrIndexSearcher Opening [Searcher@223eefa6[hdfsbackuprestore_testok_shard1_replica_n1] main]
   [junit4]   2> 1341990 INFO  (qtp820349897-14662) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 1341991 INFO  (qtp1938397818-14661) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.s.SolrIndexSearcher Opening [Searcher@19da051f[hdfsbackuprestore_testok_shard1_replica_n2] main]
   [junit4]   2> 1341992 INFO  (qtp1938397818-14661) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 1341992 INFO  (searcherExecutor-4937-thread-1-processing-n:127.0.0.1:39491_solr x:hdfsbackuprestore_testok_shard1_replica_n1 c:hdfsbackuprestore_testok s:shard1 r:core_node3) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.c.SolrCore [hdfsbackuprestore_testok_shard1_replica_n1] Registered new searcher Searcher@223eefa6[hdfsbackuprestore_testok_shard1_replica_n1] main{ExitableDirectoryReader(UninvertingDirectoryReader(Uninverting(_0(9.0.0):C49:[diagnostics={java.version=11.0.1, java.vm.version=11.0.1+13-LTS, lucene.version=9.0.0, source=flush, os.arch=amd64, java.runtime.version=11.0.1+13-LTS, os.version=4.15.0-54-generic, os=Linux, java.vendor=Oracle Corporation, timestamp=1567077320616}]:[attributes={Lucene50StoredFieldsFormat.mode=BEST_SPEED}])))}
   [junit4]   2> 1341992 INFO  (qtp820349897-14662) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard1 r:core_node3 x:hdfsbackuprestore_testok_shard1_replica_n1 ] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_testok_shard1_replica_n1]  webapp=/solr path=/update params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:45897/solr/hdfsbackuprestore_testok_shard1_replica_n2/&commit_end_point=replicas&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 190
   [junit4]   2> 1341994 INFO  (searcherExecutor-4935-thread-1-processing-n:127.0.0.1:45897_solr x:hdfsbackuprestore_testok_shard1_replica_n2 c:hdfsbackuprestore_testok s:shard1 r:core_node5) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.c.SolrCore [hdfsbackuprestore_testok_shard1_replica_n2] Registered new searcher Searcher@19da051f[hdfsbackuprestore_testok_shard1_replica_n2] main{ExitableDirectoryReader(UninvertingDirectoryReader(Uninverting(_0(9.0.0):C49:[diagnostics={java.version=11.0.1, java.vm.version=11.0.1+13-LTS, lucene.version=9.0.0, source=flush, os.arch=amd64, java.runtime.version=11.0.1+13-LTS, os.version=4.15.0-54-generic, os=Linux, java.vendor=Oracle Corporation, timestamp=1567077320616}]:[attributes={Lucene50StoredFieldsFormat.mode=BEST_SPEED}])))}
   [junit4]   2> 1341995 INFO  (qtp1938397818-14661) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard1 r:core_node5 x:hdfsbackuprestore_testok_shard1_replica_n2 ] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_testok_shard1_replica_n2]  webapp=/solr path=/update params={update.distrib=TOLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:45897/solr/hdfsbackuprestore_testok_shard2_replica_n6/&commit_end_point=leaders&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 193
   [junit4]   2> 1341996 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1643199668736229376,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 1341996 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.u.SolrIndexWriter Calling setCommitData with IW:org.apache.solr.update.SolrIndexWriter@2cab4c15 commitCommandVersion:1643199668736229376
   [junit4]   2> 1341999 INFO  (qtp820349897-14668) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1643199668739375104,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 1341999 INFO  (qtp820349897-14668) [n:127.0.0.1:39491_solr c:hdfsbackuprestore_testok s:shard2 r:core_node7 x:hdfsbackuprestore_testok_shard2_replica_n4 ] o.a.s.u.SolrIndexWriter Calling setCommitData with IW:org.apache.solr.update.SolrIndexWriter@5006905b commitCommandVersion:1643199668739375104
   [junit4]   2> 1342337 INFO  (qtp1938397818-14665) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.s.SolrIndexSearcher Opening [Searcher@79973452[hdfsbackuprestore_testok_shard2_replica_n6] main]
   [junit4]   2> 1342340 INFO  (searcherExecutor-4936-thread-1-processing-n:127.0.0.1:45897_solr x:hdfsbackuprestore_testok_shard2_replica_n6 c:hdfsbackuprestore_testok s:shard2 r:core_node8) [n:127.0.0.1:45897_solr c:hdfsbackuprestore_testok s:shard2 r:core_node8 x:hdfsbackuprestore_testok_shard2_replica_n6 ] o.a.s.c.SolrCore [hdfsbackuprestore_testok_shard2_replica_n6] Registered new searcher Searcher@79973452[hdfsbackuprestore_testok_shard2_replica_n6] main{ExitableDirectoryReader(UninvertingDirectoryReader(Uninverting(_0(9.0.0):C32:[diagnostics={java.version=11.0.1, java.vm.version=11.0.1+13-LTS, lucene.version=9.0.0, source=flush, os.arch=amd64, java.runtime.version=11.0.1+13-LTS, os.version=4.15.0-54-generic, os=Linux, java.vendor=Oracle Corp

[...truncated too long message...]

e/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

jar-checksums:
    [mkdir] Created dir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/null517261056
     [copy] Copying 249 files to /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/null517261056
   [delete] Deleting directory /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/null517261056

check-working-copy:
[ivy:cachepath] :: resolving dependencies :: #;working@lucene1-us-west
[ivy:cachepath] confs: [default]
[ivy:cachepath] found org.eclipse.jgit#org.eclipse.jgit;5.3.0.201903130848-r in public
[ivy:cachepath] found com.jcraft#jsch;0.1.54 in public
[ivy:cachepath] found com.jcraft#jzlib;1.1.1 in public
[ivy:cachepath] found com.googlecode.javaewah#JavaEWAH;1.1.6 in public
[ivy:cachepath] found org.slf4j#slf4j-api;1.7.2 in public
[ivy:cachepath] found org.bouncycastle#bcpg-jdk15on;1.60 in public
[ivy:cachepath] found org.bouncycastle#bcprov-jdk15on;1.60 in public
[ivy:cachepath] found org.bouncycastle#bcpkix-jdk15on;1.60 in public
[ivy:cachepath] found org.slf4j#slf4j-nop;1.7.2 in public
[ivy:cachepath] :: resolution report :: resolve 39ms :: artifacts dl 3ms
        ---------------------------------------------------------------------
        |                  |            modules            ||   artifacts   |
        |       conf       | number| search|dwnlded|evicted|| number|dwnlded|
        ---------------------------------------------------------------------
        |      default     |   9   |   0   |   0   |   0   ||   9   |   0   |
        ---------------------------------------------------------------------
[wc-checker] Initializing working copy...
[wc-checker] Checking working copy status...

-jenkins-base:

BUILD SUCCESSFUL
Total time: 107 minutes 56 seconds
Archiving artifacts
java.lang.InterruptedException: no matches found within 10000
        at hudson.FilePath$ValidateAntFileMask.hasMatch(FilePath.java:2847)
        at hudson.FilePath$ValidateAntFileMask.invoke(FilePath.java:2726)
        at hudson.FilePath$ValidateAntFileMask.invoke(FilePath.java:2707)
        at hudson.FilePath$FileCallableWrapper.call(FilePath.java:3086)
Also:   hudson.remoting.Channel$CallSiteStackTrace: Remote call to lucene
                at hudson.remoting.Channel.attachCallSiteStackTrace(Channel.java:1741)
                at hudson.remoting.UserRequest$ExceptionResponse.retrieve(UserRequest.java:357)
                at hudson.remoting.Channel.call(Channel.java:955)
                at hudson.FilePath.act(FilePath.java:1072)
                at hudson.FilePath.act(FilePath.java:1061)
                at hudson.FilePath.validateAntFileMask(FilePath.java:2705)
                at hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:243)
                at hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81)
                at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
                at hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744)
                at hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690)
                at hudson.model.Build$BuildExecution.post2(Build.java:186)
                at hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635)
                at hudson.model.Run.execute(Run.java:1835)
                at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
                at hudson.model.ResourceController.execute(ResourceController.java:97)
                at hudson.model.Executor.run(Executor.java:429)
Caused: hudson.FilePath$TunneledInterruptedException
        at hudson.FilePath$FileCallableWrapper.call(FilePath.java:3088)
        at hudson.remoting.UserRequest.perform(UserRequest.java:212)
        at hudson.remoting.UserRequest.perform(UserRequest.java:54)
        at hudson.remoting.Request$2.run(Request.java:369)
        at hudson.remoting.InterceptingExecutorService$1.call(InterceptingExecutorService.java:72)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:744)
Caused: java.lang.InterruptedException: java.lang.InterruptedException: no matches found within 10000
        at hudson.FilePath.act(FilePath.java:1074)
        at hudson.FilePath.act(FilePath.java:1061)
        at hudson.FilePath.validateAntFileMask(FilePath.java:2705)
        at hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:243)
        at hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81)
        at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
        at hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744)
        at hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690)
        at hudson.model.Build$BuildExecution.post2(Build.java:186)
        at hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635)
        at hudson.model.Run.execute(Run.java:1835)
        at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
        at hudson.model.ResourceController.execute(ResourceController.java:97)
        at hudson.model.Executor.run(Executor.java:429)
No artifacts found that match the file pattern "**/*.events,heapdumps/**,**/hs_err_pid*". Configuration error?
Recording test results
Build step 'Publish JUnit test result report' changed build result to UNSTABLE
Email was triggered for: Unstable (Test Failures)
Sending email for trigger: Unstable (Test Failures)


---------------------------------------------------------------------
To unsubscribe, e-mail: [hidden email]
For additional commands, e-mail: [hidden email]