hadoop-common-issues mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From GitBox <...@apache.org>
Subject [GitHub] [hadoop] goiri commented on a change in pull request #2296: HDFS-15568. namenode start failed to start when dfs.namenode.max.snapshot.limit set.
Date Sat, 12 Sep 2020 21:32:55 GMT

goiri commented on a change in pull request #2296:
URL: https://github.com/apache/hadoop/pull/2296#discussion_r487238144



##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,58 @@ public void testValidateSnapshotIDWidth() throws Exception {
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void testSnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+        numDataNodes(0).build();
+    cluster.waitActive();
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.allowSnapshot(snapshottableDir);
+    int i = 0;
+    for (; i < numSnapshots; i++) {
+      hdfs.createSnapshot(snapshottableDir, "s" + i);
+    }
+    LambdaTestUtils.intercept(SnapshotException.class,
+        "snapshot limit",
+        () -> hdfs.createSnapshot(snapshottableDir, "s5"));
+
+    // now change max snapshot directory limit to 2 and restart namenode
+    cluster.getNameNode().getConf().setInt(DFSConfigKeys.
+        DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, 2);
+    cluster.restartNameNodes();
+    SnapshotManager snapshotManager = cluster.getNamesystem().
+        getSnapshotManager();
+
+    // make sure edits of all previous 5 create snapshots are replayed
+    Assert.assertEquals(numSnapshots, snapshotManager.getNumSnapshots());
+
+    // make sure namenode has the new snapshot limit configured as 2
+    Assert.assertEquals(2, snapshotManager.getMaxSnapshotLimit());
+
+    // Any new snapshot creation should still fail
+    LambdaTestUtils.intercept(SnapshotException.class,
+        "snapshot limit",
+        () -> hdfs.createSnapshot(snapshottableDir, "s5"));
+    // now change max snapshot FS limit to 2 and restart namenode
+    cluster.getNameNode().getConf().setInt(DFSConfigKeys.
+        DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT, 2);
+    cluster.restartNameNodes();
+    snapshotManager = cluster.getNamesystem().

Review comment:
       1 line?

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,58 @@ public void testValidateSnapshotIDWidth() throws Exception {
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void testSnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+        numDataNodes(0).build();
+    cluster.waitActive();
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.allowSnapshot(snapshottableDir);
+    int i = 0;

Review comment:
       Who uses this "i" later? in for loop def should be good.

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,58 @@ public void testValidateSnapshotIDWidth() throws Exception {
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void testSnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).

Review comment:
       should we clean this cluster?

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,58 @@ public void testValidateSnapshotIDWidth() throws Exception {
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void testSnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+        numDataNodes(0).build();
+    cluster.waitActive();
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.allowSnapshot(snapshottableDir);
+    int i = 0;
+    for (; i < numSnapshots; i++) {
+      hdfs.createSnapshot(snapshottableDir, "s" + i);
+    }
+    LambdaTestUtils.intercept(SnapshotException.class,
+        "snapshot limit",
+        () -> hdfs.createSnapshot(snapshottableDir, "s5"));
+
+    // now change max snapshot directory limit to 2 and restart namenode
+    cluster.getNameNode().getConf().setInt(DFSConfigKeys.
+        DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, 2);
+    cluster.restartNameNodes();
+    SnapshotManager snapshotManager = cluster.getNamesystem().
+        getSnapshotManager();
+
+    // make sure edits of all previous 5 create snapshots are replayed
+    Assert.assertEquals(numSnapshots, snapshotManager.getNumSnapshots());
+
+    // make sure namenode has the new snapshot limit configured as 2
+    Assert.assertEquals(2, snapshotManager.getMaxSnapshotLimit());
+
+    // Any new snapshot creation should still fail
+    LambdaTestUtils.intercept(SnapshotException.class,
+        "snapshot limit",
+        () -> hdfs.createSnapshot(snapshottableDir, "s5"));
+    // now change max snapshot FS limit to 2 and restart namenode
+    cluster.getNameNode().getConf().setInt(DFSConfigKeys.
+        DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT, 2);
+    cluster.restartNameNodes();
+    snapshotManager = cluster.getNamesystem().

Review comment:
       1 line?

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,58 @@ public void testValidateSnapshotIDWidth() throws Exception {
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void testSnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+        numDataNodes(0).build();
+    cluster.waitActive();
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.allowSnapshot(snapshottableDir);
+    int i = 0;

Review comment:
       Who uses this "i" later? in for loop def should be good.

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,58 @@ public void testValidateSnapshotIDWidth() throws Exception {
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void testSnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).

Review comment:
       should we clean this cluster?

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,58 @@ public void testValidateSnapshotIDWidth() throws Exception {
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void testSnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+        numDataNodes(0).build();
+    cluster.waitActive();
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.allowSnapshot(snapshottableDir);
+    int i = 0;
+    for (; i < numSnapshots; i++) {
+      hdfs.createSnapshot(snapshottableDir, "s" + i);
+    }
+    LambdaTestUtils.intercept(SnapshotException.class,
+        "snapshot limit",
+        () -> hdfs.createSnapshot(snapshottableDir, "s5"));
+
+    // now change max snapshot directory limit to 2 and restart namenode
+    cluster.getNameNode().getConf().setInt(DFSConfigKeys.
+        DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, 2);
+    cluster.restartNameNodes();
+    SnapshotManager snapshotManager = cluster.getNamesystem().
+        getSnapshotManager();
+
+    // make sure edits of all previous 5 create snapshots are replayed
+    Assert.assertEquals(numSnapshots, snapshotManager.getNumSnapshots());
+
+    // make sure namenode has the new snapshot limit configured as 2
+    Assert.assertEquals(2, snapshotManager.getMaxSnapshotLimit());
+
+    // Any new snapshot creation should still fail
+    LambdaTestUtils.intercept(SnapshotException.class,
+        "snapshot limit",
+        () -> hdfs.createSnapshot(snapshottableDir, "s5"));
+    // now change max snapshot FS limit to 2 and restart namenode
+    cluster.getNameNode().getConf().setInt(DFSConfigKeys.
+        DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT, 2);
+    cluster.restartNameNodes();
+    snapshotManager = cluster.getNamesystem().

Review comment:
       1 line?

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,58 @@ public void testValidateSnapshotIDWidth() throws Exception {
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void testSnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+        numDataNodes(0).build();
+    cluster.waitActive();
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.allowSnapshot(snapshottableDir);
+    int i = 0;

Review comment:
       Who uses this "i" later? in for loop def should be good.

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,58 @@ public void testValidateSnapshotIDWidth() throws Exception {
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void testSnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).

Review comment:
       should we clean this cluster?

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,58 @@ public void testValidateSnapshotIDWidth() throws Exception {
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void testSnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+        numDataNodes(0).build();
+    cluster.waitActive();
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.allowSnapshot(snapshottableDir);
+    int i = 0;
+    for (; i < numSnapshots; i++) {
+      hdfs.createSnapshot(snapshottableDir, "s" + i);
+    }
+    LambdaTestUtils.intercept(SnapshotException.class,
+        "snapshot limit",
+        () -> hdfs.createSnapshot(snapshottableDir, "s5"));
+
+    // now change max snapshot directory limit to 2 and restart namenode
+    cluster.getNameNode().getConf().setInt(DFSConfigKeys.
+        DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, 2);
+    cluster.restartNameNodes();
+    SnapshotManager snapshotManager = cluster.getNamesystem().
+        getSnapshotManager();
+
+    // make sure edits of all previous 5 create snapshots are replayed
+    Assert.assertEquals(numSnapshots, snapshotManager.getNumSnapshots());
+
+    // make sure namenode has the new snapshot limit configured as 2
+    Assert.assertEquals(2, snapshotManager.getMaxSnapshotLimit());
+
+    // Any new snapshot creation should still fail
+    LambdaTestUtils.intercept(SnapshotException.class,
+        "snapshot limit",
+        () -> hdfs.createSnapshot(snapshottableDir, "s5"));
+    // now change max snapshot FS limit to 2 and restart namenode
+    cluster.getNameNode().getConf().setInt(DFSConfigKeys.
+        DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT, 2);
+    cluster.restartNameNodes();
+    snapshotManager = cluster.getNamesystem().

Review comment:
       1 line?

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,58 @@ public void testValidateSnapshotIDWidth() throws Exception {
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void testSnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+        numDataNodes(0).build();
+    cluster.waitActive();
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.allowSnapshot(snapshottableDir);
+    int i = 0;

Review comment:
       Who uses this "i" later? in for loop def should be good.

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,58 @@ public void testValidateSnapshotIDWidth() throws Exception {
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void testSnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).

Review comment:
       should we clean this cluster?

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,58 @@ public void testValidateSnapshotIDWidth() throws Exception {
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void testSnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+        numDataNodes(0).build();
+    cluster.waitActive();
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.allowSnapshot(snapshottableDir);
+    int i = 0;
+    for (; i < numSnapshots; i++) {
+      hdfs.createSnapshot(snapshottableDir, "s" + i);
+    }
+    LambdaTestUtils.intercept(SnapshotException.class,
+        "snapshot limit",
+        () -> hdfs.createSnapshot(snapshottableDir, "s5"));
+
+    // now change max snapshot directory limit to 2 and restart namenode
+    cluster.getNameNode().getConf().setInt(DFSConfigKeys.
+        DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, 2);
+    cluster.restartNameNodes();
+    SnapshotManager snapshotManager = cluster.getNamesystem().
+        getSnapshotManager();
+
+    // make sure edits of all previous 5 create snapshots are replayed
+    Assert.assertEquals(numSnapshots, snapshotManager.getNumSnapshots());
+
+    // make sure namenode has the new snapshot limit configured as 2
+    Assert.assertEquals(2, snapshotManager.getMaxSnapshotLimit());
+
+    // Any new snapshot creation should still fail
+    LambdaTestUtils.intercept(SnapshotException.class,
+        "snapshot limit",
+        () -> hdfs.createSnapshot(snapshottableDir, "s5"));
+    // now change max snapshot FS limit to 2 and restart namenode
+    cluster.getNameNode().getConf().setInt(DFSConfigKeys.
+        DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT, 2);
+    cluster.restartNameNodes();
+    snapshotManager = cluster.getNamesystem().

Review comment:
       1 line?

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,58 @@ public void testValidateSnapshotIDWidth() throws Exception {
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void testSnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+        numDataNodes(0).build();
+    cluster.waitActive();
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.allowSnapshot(snapshottableDir);
+    int i = 0;

Review comment:
       Who uses this "i" later? in for loop def should be good.

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,58 @@ public void testValidateSnapshotIDWidth() throws Exception {
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void testSnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).

Review comment:
       should we clean this cluster?

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,58 @@ public void testValidateSnapshotIDWidth() throws Exception {
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void testSnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+        numDataNodes(0).build();
+    cluster.waitActive();
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.allowSnapshot(snapshottableDir);
+    int i = 0;
+    for (; i < numSnapshots; i++) {
+      hdfs.createSnapshot(snapshottableDir, "s" + i);
+    }
+    LambdaTestUtils.intercept(SnapshotException.class,
+        "snapshot limit",
+        () -> hdfs.createSnapshot(snapshottableDir, "s5"));
+
+    // now change max snapshot directory limit to 2 and restart namenode
+    cluster.getNameNode().getConf().setInt(DFSConfigKeys.
+        DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, 2);
+    cluster.restartNameNodes();
+    SnapshotManager snapshotManager = cluster.getNamesystem().
+        getSnapshotManager();
+
+    // make sure edits of all previous 5 create snapshots are replayed
+    Assert.assertEquals(numSnapshots, snapshotManager.getNumSnapshots());
+
+    // make sure namenode has the new snapshot limit configured as 2
+    Assert.assertEquals(2, snapshotManager.getMaxSnapshotLimit());
+
+    // Any new snapshot creation should still fail
+    LambdaTestUtils.intercept(SnapshotException.class,
+        "snapshot limit",
+        () -> hdfs.createSnapshot(snapshottableDir, "s5"));
+    // now change max snapshot FS limit to 2 and restart namenode
+    cluster.getNameNode().getConf().setInt(DFSConfigKeys.
+        DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT, 2);
+    cluster.restartNameNodes();
+    snapshotManager = cluster.getNamesystem().

Review comment:
       1 line?

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,58 @@ public void testValidateSnapshotIDWidth() throws Exception {
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void testSnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+        numDataNodes(0).build();
+    cluster.waitActive();
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.allowSnapshot(snapshottableDir);
+    int i = 0;

Review comment:
       Who uses this "i" later? in for loop def should be good.

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,58 @@ public void testValidateSnapshotIDWidth() throws Exception {
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void testSnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).

Review comment:
       should we clean this cluster?

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,58 @@ public void testValidateSnapshotIDWidth() throws Exception {
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void testSnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+        numDataNodes(0).build();
+    cluster.waitActive();
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.allowSnapshot(snapshottableDir);
+    int i = 0;
+    for (; i < numSnapshots; i++) {
+      hdfs.createSnapshot(snapshottableDir, "s" + i);
+    }
+    LambdaTestUtils.intercept(SnapshotException.class,
+        "snapshot limit",
+        () -> hdfs.createSnapshot(snapshottableDir, "s5"));
+
+    // now change max snapshot directory limit to 2 and restart namenode
+    cluster.getNameNode().getConf().setInt(DFSConfigKeys.
+        DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, 2);
+    cluster.restartNameNodes();
+    SnapshotManager snapshotManager = cluster.getNamesystem().
+        getSnapshotManager();
+
+    // make sure edits of all previous 5 create snapshots are replayed
+    Assert.assertEquals(numSnapshots, snapshotManager.getNumSnapshots());
+
+    // make sure namenode has the new snapshot limit configured as 2
+    Assert.assertEquals(2, snapshotManager.getMaxSnapshotLimit());
+
+    // Any new snapshot creation should still fail
+    LambdaTestUtils.intercept(SnapshotException.class,
+        "snapshot limit",
+        () -> hdfs.createSnapshot(snapshottableDir, "s5"));
+    // now change max snapshot FS limit to 2 and restart namenode
+    cluster.getNameNode().getConf().setInt(DFSConfigKeys.
+        DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT, 2);
+    cluster.restartNameNodes();
+    snapshotManager = cluster.getNamesystem().

Review comment:
       1 line?

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,58 @@ public void testValidateSnapshotIDWidth() throws Exception {
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void testSnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+        numDataNodes(0).build();
+    cluster.waitActive();
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.allowSnapshot(snapshottableDir);
+    int i = 0;

Review comment:
       Who uses this "i" later? in for loop def should be good.

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,58 @@ public void testValidateSnapshotIDWidth() throws Exception {
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void testSnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).

Review comment:
       should we clean this cluster?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: common-issues-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-issues-help@hadoop.apache.org


Mime
View raw message