drill-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sudhe...@apache.org
Subject [2/4] drill git commit: DRILL-5126: Provide simplified, unified "cluster fixture" for test
Date Mon, 30 Jan 2017 19:42:39 GMT
http://git-wip-us.apache.org/repos/asf/drill/blob/5c3924c9/exec/java-exec/src/test/java/org/apache/drill/test/LogFixture.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/LogFixture.java b/exec/java-exec/src/test/java/org/apache/drill/test/LogFixture.java
new file mode 100644
index 0000000..d2242a1
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/LogFixture.java
@@ -0,0 +1,255 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.slf4j.LoggerFactory;
+
+import ch.qos.logback.classic.Level;
+import ch.qos.logback.classic.Logger;
+import ch.qos.logback.classic.LoggerContext;
+import ch.qos.logback.classic.encoder.PatternLayoutEncoder;
+import ch.qos.logback.classic.spi.ILoggingEvent;
+import ch.qos.logback.core.ConsoleAppender;
+
+/**
+ * Establishes test-specific logging without having to alter the global
+ * <tt>logback-test.xml</tt> file. Allows directing output to the console
+ * (if not already configured) and setting the log level on specific loggers
+ * of interest in the test. The fixture automatically restores the original
+ * log configuration on exit.
+ * <p>
+ * Typical usage: <pre><code>
+ * {@literal @}Test
+ * public void myTest() {
+ *   LogFixtureBuilder logBuilder = LogFixture.builder()
+ *          .toConsole()
+ *          .disable() // Silence all other loggers
+ *          .logger(ExternalSortBatch.class, Level.DEBUG);
+ *   try (LogFixture logs = logBuilder.build()) {
+ *     // Test code here
+ *   }
+ * }</code></pre>
+ *  <p>
+ * You can &ndash; and should &ndash; combine the log fixtue with the
+ * cluster and client fixtures to have complete control over your test-time
+ * Drill environment.
+ */
+
+public class LogFixture implements AutoCloseable {
+
+  // Elapsed time in ms, log level, thread, logger, message.
+
+  public static final String DEFAULT_CONSOLE_FORMAT = "%r %level [%thread] [%logger] - %msg%n";
+  private static final String DRILL_PACKAGE_NAME = "org.apache.drill";
+
+  /**
+   * Memento for a logger name and level.
+   */
+  public static class LogSpec {
+    String loggerName;
+    Level logLevel;
+
+    public LogSpec(String loggerName, Level level) {
+      this.loggerName = loggerName;
+      this.logLevel = level;
+    }
+  }
+
+  /**
+   * Builds the log settings to be used for a test. The log settings here
+   * add to those specified in a <tt>logback.xml</tt> or
+   * <tt>logback-test.xml</tt> file on your class path. In particular, if
+   * the logging configuration already redirects the Drill logger to the
+   * console, setting console logging here does nothing.
+   */
+
+  public static class LogFixtureBuilder {
+
+    private String consoleFormat = DEFAULT_CONSOLE_FORMAT;
+    private boolean logToConsole;
+    private List<LogSpec> loggers = new ArrayList<>();
+
+    /**
+     * Send all enabled logging to the console (if not already configured.) Some
+     * Drill log configuration files send the root to the console (or file), but
+     * the Drill loggers to Lilith. In that case, Lilith "hides" the console
+     * logger. Using this call adds a console logger to the Drill logger so that
+     * output does, in fact, go to the console regardless of the configuration
+     * in the Logback configuration file.
+     *
+     * @return this builder
+     */
+    public LogFixtureBuilder toConsole() {
+      logToConsole = true;
+      return this;
+    }
+
+    /**
+     * Send logging to the console using the defined format.
+     *
+     * @param format valid Logback log format
+     * @return this builder
+     */
+
+    public LogFixtureBuilder toConsole(String format) {
+      consoleFormat = format;
+      return toConsole();
+    }
+
+    /**
+     * Set a specific logger to the given level.
+     *
+     * @param loggerName name of the logger (typically used for package-level
+     * loggers)
+     * @param level the desired Logback-defined level
+     * @return this builder
+     */
+    public LogFixtureBuilder logger(String loggerName, Level level) {
+      loggers.add(new LogSpec(loggerName, level));
+      return this;
+    }
+
+    /**
+     * Set a specific logger to the given level.
+     *
+     * @param loggerClass class that defines the logger (typically used for
+     * class-specific loggers)
+     * @param level the desired Logback-defined level
+     * @return this builder
+     */
+    public LogFixtureBuilder logger(Class<?> loggerClass, Level level) {
+      loggers.add(new LogSpec(loggerClass.getName(), level));
+      return this;
+    }
+
+    /**
+     * Turns off all logging. If called first, you can set disable as your
+     * general policy, then turn back on loggers selectively for those
+     * of interest.
+     * @return this builder
+     */
+    public LogFixtureBuilder disable() {
+      return rootLogger(Level.OFF);
+    }
+
+    /**
+     * Set the desired log level on the root logger.
+     * @param level the desired Logback log level
+     * @return this builder
+     */
+
+    public LogFixtureBuilder rootLogger(Level level) {
+      loggers.add(new LogSpec(Logger.ROOT_LOGGER_NAME, level));
+      return this;
+    }
+
+    /**
+     * Apply the log levels and output, then return a fixture to be used
+     * in a try-with-resources block. The fixture automatically restores
+     * the original configuration on completion of the try block.
+     * @return the log fixture
+     */
+    public LogFixture build() {
+      return new LogFixture(this);
+    }
+  }
+
+  private PatternLayoutEncoder ple;
+  private ConsoleAppender<ILoggingEvent> appender;
+  private List<LogSpec> loggers = new ArrayList<>();
+  private Logger drillLogger;
+
+  public LogFixture(LogFixtureBuilder builder) {
+    if (builder.logToConsole) {
+      setupConsole(builder);
+    }
+    setupLoggers(builder);
+  }
+
+  /**
+   * Creates a new log fixture builder.
+   * @return the log fixture builder
+   */
+
+  public static LogFixtureBuilder builder() {
+    return new LogFixtureBuilder();
+  }
+
+  private void setupConsole(LogFixtureBuilder builder) {
+    Logger drillLogger = (Logger)LoggerFactory.getLogger(DRILL_PACKAGE_NAME);
+    if (drillLogger.getAppender("STDOUT") != null) {
+      return;
+    }
+    LoggerContext lc = (LoggerContext) LoggerFactory.getILoggerFactory();
+    ple = new PatternLayoutEncoder();
+    ple.setPattern(builder.consoleFormat);
+    ple.setContext(lc);
+    ple.start();
+
+    appender = new ConsoleAppender<>( );
+    appender.setContext(lc);
+    appender.setName("Console");
+    appender.setEncoder( ple );
+    appender.start();
+
+    Logger root = (Logger)LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME);
+    root.addAppender(appender);
+    drillLogger.addAppender(appender);
+  }
+
+  private void setupLoggers(LogFixtureBuilder builder) {
+    for (LogSpec spec : builder.loggers) {
+      setupLogger(spec);
+    }
+  }
+
+  private void setupLogger(LogSpec spec) {
+    Logger logger = (Logger)LoggerFactory.getLogger(spec.loggerName);
+    Level oldLevel = logger.getLevel();
+    logger.setLevel(spec.logLevel);
+    loggers.add(new LogSpec(spec.loggerName, oldLevel));
+  }
+
+  @Override
+  public void close() {
+    restoreLoggers();
+    restoreConsole();
+  }
+
+  private void restoreLoggers() {
+    for (LogSpec spec : loggers) {
+      Logger logger = (Logger)LoggerFactory.getLogger(spec.loggerName);
+      logger.setLevel(spec.logLevel);
+    }
+  }
+
+  private void restoreConsole() {
+    if (appender == null) {
+      return;
+    }
+    Logger root = (Logger)LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME);
+    root.detachAppender(appender);
+    drillLogger.detachAppender(appender);
+    appender.stop();
+    ple.stop();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/5c3924c9/exec/java-exec/src/test/java/org/apache/drill/test/ProfileParser.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/ProfileParser.java b/exec/java-exec/src/test/java/org/apache/drill/test/ProfileParser.java
new file mode 100644
index 0000000..f9df768
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/ProfileParser.java
@@ -0,0 +1,219 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ ******************************************************************************/
+package org.apache.drill.test;
+
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import javax.json.Json;
+import javax.json.JsonArray;
+import javax.json.JsonNumber;
+import javax.json.JsonObject;
+import javax.json.JsonReader;
+import javax.json.JsonValue;
+
+/**
+ * Parses a query profile and provides access to various bits of the profile
+ * for diagnostic purposes during tests.
+ */
+
+public class ProfileParser {
+
+  JsonObject profile;
+  List<String> plans;
+
+  public ProfileParser( File file ) throws IOException {
+    try (FileReader fileReader = new FileReader(file);
+         JsonReader reader = Json.createReader(fileReader)) {
+      profile = (JsonObject) reader.read();
+    }
+  }
+
+  public String getQuery( ) {
+    return profile.get("query").toString();
+  }
+
+  public String getPlan() {
+    return profile.get("plan").toString();
+  }
+
+  public List<String> getPlans() {
+    if ( plans != null ) {
+      return plans; }
+    String plan = getPlan( );
+    Pattern p = Pattern.compile( "(\\d\\d-\\d+[^\\\\]*)\\\\n", Pattern.MULTILINE );
+    Matcher m = p.matcher(plan);
+    plans = new ArrayList<>( );
+    while ( m.find() ) {
+      plans.add(m.group(1));
+    }
+    return plans;
+  }
+
+  public List<String> getScans( ) {
+    List<String> scans = new ArrayList<>();
+    int n = getPlans( ).size();
+//    Pattern p = Pattern.compile( "\\d+-\\d+\\s+(\\w+)\\(" );
+    for ( int i = n-1; i >= 0;  i-- ) {
+      String plan = plans.get( i );
+//      Matcher m = p.matcher( plan );
+//      if ( ! m.find() ) { continue; }
+      if ( plan.contains( " Scan(" ) ) {
+        scans.add( plan );
+      }
+    }
+    return scans;
+  }
+
+  public List<FieldDef> getColumns( String plan ) {
+    Pattern p = Pattern.compile( "RecordType\\((.*)\\):" );
+    Matcher m = p.matcher(plan);
+    if ( ! m.find() ) { return null; }
+    String frag = m.group(1);
+    String parts[] = frag.split( ", " );
+    List<FieldDef> fields = new ArrayList<>( );
+    for ( String part : parts ) {
+      String halves[] = part.split( " " );
+      fields.add( new FieldDef( halves[1], halves[0] ) );
+    }
+    return fields;
+  }
+
+  public Map<Integer,String> getOperators( ) {
+    Map<Integer,String> ops = new HashMap<>();
+    int n = getPlans( ).size();
+    Pattern p = Pattern.compile( "\\d+-(\\d+)\\s+(\\w+)" );
+    for ( int i = n-1; i >= 0;  i-- ) {
+      String plan = plans.get( i );
+      Matcher m = p.matcher( plan );
+      if ( ! m.find() ) { continue; }
+      int index = Integer.parseInt(m.group(1));
+      String op = m.group(2);
+      ops.put(index,op);
+    }
+    return ops;
+  }
+
+  public JsonArray getFragmentProfile( ) {
+    return profile.getJsonArray("fragmentProfile");
+  }
+
+  public static class OpInfo {
+    int opId;
+    int type;
+    String name;
+    long processMs;
+    long waitMs;
+    long setupMs;
+    long peakMem;
+    Map<Integer,JsonValue> metrics = new HashMap<>();
+
+    public long getMetric(int id) {
+      JsonValue value = metrics.get(id);
+      if (value == null) {
+        return 0; }
+      return ((JsonNumber) value).longValue();
+    }
+  }
+
+  public Map<Integer,OpInfo> getOpInfo( ) {
+    Map<Integer,String> ops = getOperators( );
+    Map<Integer,OpInfo> info = new HashMap<>( );
+    JsonArray frags = getFragmentProfile( );
+    JsonObject fragProfile = frags.getJsonObject(0).getJsonArray("minorFragmentProfile").getJsonObject(0);
+    JsonArray opList = fragProfile.getJsonArray("operatorProfile");
+    for ( JsonObject opProfile : opList.getValuesAs(JsonObject.class) ) {
+      parseOpProfile( ops, info, opProfile );
+    }
+    return info;
+  }
+
+  private void parseOpProfile(Map<Integer, String> ops,
+      Map<Integer, OpInfo> info, JsonObject opProfile) {
+    OpInfo opInfo = new OpInfo( );
+    opInfo.opId = opProfile.getInt("operatorId");
+    opInfo.type = opProfile.getInt("operatorType");
+    opInfo.name = ops.get(opInfo.opId);
+    opInfo.processMs = opProfile.getJsonNumber("processNanos").longValue() / 1_000_000;
+    opInfo.waitMs = opProfile.getJsonNumber("waitNanos").longValue() / 1_000_000;
+    opInfo.setupMs = opProfile.getJsonNumber("setupNanos").longValue() / 1_000_000;
+    opInfo.peakMem = opProfile.getJsonNumber("peakLocalMemoryAllocated").longValue() / (1024
* 1024);
+    JsonArray array = opProfile.getJsonArray("metric");
+    if (array != null) {
+      for (int i = 0; i < array.size(); i++) {
+        JsonObject metric = array.getJsonObject(i);
+        opInfo.metrics.put(metric.getJsonNumber("metricId").intValue(), metric.get("longValue"));
+      }
+    }
+    info.put(opInfo.opId, opInfo);
+  }
+
+  public void print() {
+    Map<Integer, OpInfo> opInfo = getOpInfo();
+    int n = opInfo.size();
+    long totalSetup = 0;
+    long totalProcess = 0;
+    for ( int i = 0;  i <= n;  i++ ) {
+      OpInfo op = opInfo.get(i);
+      if ( op == null ) { continue; }
+      totalSetup += op.setupMs;
+      totalProcess += op.processMs;
+    }
+    long total = totalSetup + totalProcess;
+    for ( int i = 0;  i <= n;  i++ ) {
+      OpInfo op = opInfo.get(i);
+      if ( op == null ) { continue; }
+      System.out.print( "Op: " );
+      System.out.print( op.opId );
+      System.out.println( " " + op.name );
+      System.out.print( "  Setup:   " + op.setupMs );
+      System.out.print( " - " + percent(op.setupMs, totalSetup ) + "%" );
+      System.out.println( ", " + percent(op.setupMs, total ) + "%" );
+      System.out.print( "  Process: " + op.processMs );
+      System.out.print( " - " + percent(op.processMs, totalProcess ) + "%" );
+      System.out.println( ", " + percent(op.processMs, total ) + "%" );
+      if (op.type == 17) {
+        long value = op.getMetric(0);
+        System.out.println( "  Spills: " + value );
+      }
+      if (op.waitMs > 0) {
+        System.out.println( "  Wait:    " + op.waitMs );
+      }
+      if ( op.peakMem > 0) {
+        System.out.println( "  Memory: " + op.peakMem );
+      }
+    }
+    System.out.println( "Total:" );
+    System.out.println( "  Setup:   " + totalSetup );
+    System.out.println( "  Process: " + totalProcess );
+  }
+
+  public static long percent( long value, long total ) {
+    if ( total == 0 ) {
+      return 0; }
+    return Math.round(value * 100 / total );
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/5c3924c9/exec/java-exec/src/test/java/org/apache/drill/test/QueryBuilder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/QueryBuilder.java b/exec/java-exec/src/test/java/org/apache/drill/test/QueryBuilder.java
new file mode 100644
index 0000000..084c28a
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/QueryBuilder.java
@@ -0,0 +1,455 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.test;
+
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.drill.PlanTestBase;
+import org.apache.drill.QueryTestUtil;
+import org.apache.drill.common.config.DrillConfig;
+import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.exec.client.PrintingResultsListener;
+import org.apache.drill.exec.client.QuerySubmitter.Format;
+import org.apache.drill.exec.proto.UserBitShared.QueryId;
+import org.apache.drill.exec.proto.UserBitShared.QueryResult.QueryState;
+import org.apache.drill.exec.proto.UserBitShared.QueryType;
+import org.apache.drill.exec.proto.helper.QueryIdHelper;
+import org.apache.drill.exec.record.RecordBatchLoader;
+import org.apache.drill.exec.record.VectorWrapper;
+import org.apache.drill.exec.rpc.ConnectionThrottle;
+import org.apache.drill.exec.rpc.RpcException;
+import org.apache.drill.exec.rpc.user.AwaitableUserResultsListener;
+import org.apache.drill.exec.rpc.user.QueryDataBatch;
+import org.apache.drill.exec.rpc.user.UserResultsListener;
+import org.apache.drill.exec.util.VectorUtil;
+import org.apache.drill.exec.vector.NullableVarCharVector;
+import org.apache.drill.exec.vector.ValueVector;
+import org.apache.drill.test.BufferingQueryEventListener.QueryEvent;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Builder for a Drill query. Provides all types of query formats,
+ * and a variety of ways to run the query.
+ */
+
+public class QueryBuilder {
+
+  /**
+   * Listener used to retrieve the query summary (only) asynchronously
+   * using a {@link QuerySummaryFuture}.
+   */
+
+  public class SummaryOnlyQueryEventListener implements UserResultsListener {
+
+    private final QuerySummaryFuture future;
+    private QueryId queryId;
+    private int recordCount;
+    private int batchCount;
+    private long startTime;
+
+    public SummaryOnlyQueryEventListener(QuerySummaryFuture future) {
+      this.future = future;
+      startTime = System.currentTimeMillis();
+    }
+
+    @Override
+    public void queryIdArrived(QueryId queryId) {
+      this.queryId = queryId;
+    }
+
+    @Override
+    public void submissionFailed(UserException ex) {
+      future.completed(
+          new QuerySummary(queryId, recordCount, batchCount,
+                           System.currentTimeMillis() - startTime, ex));
+    }
+
+    @Override
+    public void dataArrived(QueryDataBatch result, ConnectionThrottle throttle) {
+      batchCount++;
+      recordCount += result.getHeader().getRowCount();
+      result.release();
+    }
+
+    @Override
+    public void queryCompleted(QueryState state) {
+      future.completed(
+          new QuerySummary(queryId, recordCount, batchCount,
+                           System.currentTimeMillis() - startTime, state));
+    }
+  }
+
+  /**
+   * The future used to wait for the completion of an async query. Returns
+   * just the summary of the query.
+   */
+
+  public class QuerySummaryFuture implements Future<QuerySummary> {
+
+    /**
+     * Synchronizes the listener thread and the test thread that
+     * launched the query.
+     */
+
+    private CountDownLatch lock = new CountDownLatch(1);
+    private QuerySummary summary;
+
+    /**
+     * Unsupported at present.
+     */
+
+    @Override
+    public boolean cancel(boolean mayInterruptIfRunning) {
+      throw new UnsupportedOperationException();
+    }
+
+    /**
+     * Always returns false.
+     */
+
+    @Override
+    public boolean isCancelled() { return false; }
+
+    @Override
+    public boolean isDone() { return summary != null; }
+
+    @Override
+    public QuerySummary get() throws InterruptedException, ExecutionException {
+      lock.await();
+      return summary;
+    }
+
+    /**
+     * Not supported at present, just does a non-timeout get.
+     */
+
+    @Override
+    public QuerySummary get(long timeout, TimeUnit unit)
+        throws InterruptedException, ExecutionException, TimeoutException {
+      return get();
+    }
+
+    protected void completed(QuerySummary querySummary) {
+      summary = querySummary;
+      lock.countDown();
+    }
+  }
+
+  /**
+   * Summary results of a query: records, batches, run time.
+   */
+
+  public static class QuerySummary {
+    private final QueryId queryId;
+    private final int records;
+    private final int batches;
+    private final long ms;
+    private final QueryState finalState;
+    private final Exception error;
+
+    public QuerySummary(QueryId queryId, int recordCount, int batchCount, long elapsed, QueryState
state) {
+      this.queryId = queryId;
+      records = recordCount;
+      batches = batchCount;
+      ms = elapsed;
+      finalState = state;
+      error = null;
+    }
+
+    public QuerySummary(QueryId queryId, int recordCount, int batchCount, long elapsed, Exception
ex) {
+      this.queryId = queryId;
+      records = recordCount;
+      batches = batchCount;
+      ms = elapsed;
+      finalState = null;
+      error = ex;
+    }
+
+    public boolean failed() { return error != null; }
+    public boolean succeeded() { return error == null; }
+    public long recordCount() { return records; }
+    public int batchCount() { return batches; }
+    public long runTimeMs() { return ms; }
+    public QueryId queryId() { return queryId; }
+    public String queryIdString() { return QueryIdHelper.getQueryId(queryId); }
+    public Exception error() { return error; }
+    public QueryState finalState() { return finalState; }
+  }
+
+  private final ClientFixture client;
+  private QueryType queryType;
+  private String queryText;
+
+  QueryBuilder(ClientFixture client) {
+    this.client = client;
+  }
+
+  public QueryBuilder query(QueryType type, String text) {
+    queryType = type;
+    queryText = text;
+    return this;
+  }
+
+  public QueryBuilder sql(String sql) {
+    return query(QueryType.SQL, sql);
+  }
+
+  public QueryBuilder sql(String query, Object... args) {
+    return sql(String.format(query, args));
+  }
+
+  public QueryBuilder physical(String plan) {
+    return query(QueryType.PHYSICAL, plan);
+  }
+
+  public QueryBuilder sqlResource(String resource) {
+    sql(ClusterFixture.loadResource(resource));
+    return this;
+  }
+
+  public QueryBuilder sqlResource(String resource, Object... args) {
+    sql(ClusterFixture.loadResource(resource), args);
+    return this;
+  }
+
+  public QueryBuilder physicalResource(String resource) {
+    physical(ClusterFixture.loadResource(resource));
+    return this;
+  }
+
+  /**
+   * Run the query returning just a summary of the results: record count,
+   * batch count and run time. Handy when doing performance tests when the
+   * validity of the results is verified in some other test.
+   *
+   * @return the query summary
+   * @throws Exception if anything goes wrong anywhere in the execution
+   */
+
+  public QuerySummary run() throws Exception {
+    return produceSummary(withEventListener());
+  }
+
+  /**
+   * Run the query and return a list of the result batches. Use
+   * if the batch count is small and you want to work with them.
+   * @return a list of batches resulting from the query
+   * @throws RpcException
+   */
+
+  public List<QueryDataBatch> results() throws RpcException {
+    Preconditions.checkNotNull(queryType, "Query not provided.");
+    Preconditions.checkNotNull(queryText, "Query not provided.");
+    return client.client().runQuery(queryType, queryText);
+  }
+
+  /**
+   * Run the query with the listener provided. Use when the result
+   * count will be large, or you don't need the results.
+   *
+   * @param listener the Drill listener
+   */
+
+  public void withListener(UserResultsListener listener) {
+    Preconditions.checkNotNull(queryType, "Query not provided.");
+    Preconditions.checkNotNull(queryText, "Query not provided.");
+    client.client().runQuery(queryType, queryText, listener);
+  }
+
+  /**
+   * Run the query, return an easy-to-use event listener to process
+   * the query results. Use when the result set is large. The listener
+   * allows the caller to iterate over results in the test thread.
+   * (The listener implements a producer-consumer model to hide the
+   * details of Drill listeners.)
+   *
+   * @return the query event listener
+   */
+
+  public BufferingQueryEventListener withEventListener() {
+    BufferingQueryEventListener listener = new BufferingQueryEventListener();
+    withListener(listener);
+    return listener;
+  }
+
+  public long printCsv() {
+    return print(Format.CSV);
+  }
+
+  public long print(Format format) {
+    return print(format,20);
+  }
+
+  public long print(Format format, int colWidth) {
+    return runAndWait(new PrintingResultsListener(client.cluster().config(), format, colWidth));
+  }
+
+  /**
+   * Run the query asynchronously, returning a future to be used
+   * to check for query completion, wait for completion, and obtain
+   * the result summary.
+   */
+
+  public QuerySummaryFuture futureSummary() {
+    QuerySummaryFuture future = new QuerySummaryFuture();
+    withListener(new SummaryOnlyQueryEventListener(future));
+    return future;
+  }
+
+  /**
+   * Run a query and optionally print the output in TSV format.
+   * Similar to {@link QueryTestUtil#test} with one query. Output is printed
+   * only if the tests are running as verbose.
+   *
+   * @return the number of rows returned
+   * @throws Exception if anything goes wrong with query execution
+   */
+  public long print() throws Exception {
+    DrillConfig config = client.cluster().config( );
+
+    // Note: verbose check disabled until that change is
+    // committed.
+
+    boolean verbose = ! config.getBoolean(QueryTestUtil.TEST_QUERY_PRINTING_SILENT) /* ||
+                      DrillTest.verbose() */;
+    if (verbose) {
+      return print(Format.TSV, VectorUtil.DEFAULT_COLUMN_WIDTH);
+    } else {
+      return run().recordCount();
+    }
+  }
+
+  public long runAndWait(UserResultsListener listener) {
+    AwaitableUserResultsListener resultListener =
+        new AwaitableUserResultsListener(listener);
+    withListener(resultListener);
+    try {
+      return resultListener.await();
+    } catch (Exception e) {
+      throw new IllegalStateException(e);
+    }
+  }
+
+  /**
+   * Submit an "EXPLAIN" statement, and return text form of the
+   * plan.
+   * @throws Exception if the query fails
+   */
+
+  public String explainText() throws Exception {
+    return explain(ClusterFixture.EXPLAIN_PLAN_TEXT);
+  }
+
+  /**
+   * Submit an "EXPLAIN" statement, and return the JSON form of the
+   * plan.
+   * @throws Exception if the query fails
+   */
+
+  public String explainJson() throws Exception {
+    return explain(ClusterFixture.EXPLAIN_PLAN_JSON);
+  }
+
+  public String explain(String format) throws Exception {
+    queryText = "EXPLAIN PLAN FOR " + queryText;
+    return queryPlan(format);
+  }
+
+  private QuerySummary produceSummary(BufferingQueryEventListener listener) throws Exception
{
+    long start = System.currentTimeMillis();
+    int recordCount = 0;
+    int batchCount = 0;
+    QueryId queryId = null;
+    QueryState state = null;
+    loop:
+    for (;;) {
+      QueryEvent event = listener.get();
+      switch (event.type)
+      {
+      case BATCH:
+        batchCount++;
+        recordCount += event.batch.getHeader().getRowCount();
+        event.batch.release();
+        break;
+      case EOF:
+        state = event.state;
+        break loop;
+      case ERROR:
+        throw event.error;
+      case QUERY_ID:
+        queryId = event.queryId;
+        break;
+      default:
+        throw new IllegalStateException("Unexpected event: " + event.type);
+      }
+    }
+    long end = System.currentTimeMillis();
+    long elapsed = end - start;
+    return new QuerySummary(queryId, recordCount, batchCount, elapsed, state);
+  }
+
+  /**
+   * Submit an "EXPLAIN" statement, and return the column value which
+   * contains the plan's string.
+   * <p>
+   * Cribbed from {@link PlanTestBase#getPlanInString(String, String)}
+   * @throws Exception if anything goes wrogn in the query
+   */
+
+  protected String queryPlan(String columnName) throws Exception {
+    Preconditions.checkArgument(queryType == QueryType.SQL, "Can only explan an SQL query.");
+    final List<QueryDataBatch> results = results();
+    final RecordBatchLoader loader = new RecordBatchLoader(client.allocator());
+    final StringBuilder builder = new StringBuilder();
+
+    for (final QueryDataBatch b : results) {
+      if (!b.hasData()) {
+        continue;
+      }
+
+      loader.load(b.getHeader().getDef(), b.getData());
+
+      final VectorWrapper<?> vw;
+      try {
+          vw = loader.getValueAccessorById(
+              NullableVarCharVector.class,
+              loader.getValueVectorId(SchemaPath.getSimplePath(columnName)).getFieldIds());
+      } catch (Throwable t) {
+        throw new IllegalStateException("Looks like you did not provide an explain plan query,
please add EXPLAIN PLAN FOR to the beginning of your query.");
+      }
+
+      @SuppressWarnings("resource")
+      final ValueVector vv = vw.getValueVector();
+      for (int i = 0; i < vv.getAccessor().getValueCount(); i++) {
+        final Object o = vv.getAccessor().getObject(i);
+        builder.append(o);
+      }
+      loader.clear();
+      b.release();
+    }
+
+    return builder.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/5c3924c9/exec/java-exec/src/test/java/org/apache/drill/test/package-info.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/package-info.java b/exec/java-exec/src/test/java/org/apache/drill/test/package-info.java
new file mode 100644
index 0000000..9f62478
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/package-info.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Provides a variety of test framework tools to simplify Drill unit
+ * tests and ad-hoc tests created while developing features. Key components
+ * include:
+ * <ul>
+ * <li>{@link FixtureBuilder}: Builder pattern to create an embedded Drillbit,
+ * or cluster of Drillbits, using a specified set of configuration, session
+ * and system options.</li>
+ * <li>{@link ClusterFixture}: The cluster created by the builder.</li>
+ * <li>{@link ClientFixture}: A facade to the Drill client that provides
+ * convenience methods for setting session options, running queries and
+ * so on. A client is associated with a cluster. If tests desire, multiple
+ * clients can be created for a single cluster, though most need just one
+ * client. A builder exists for clients, but most tests get the client
+ * directly from the cluster.</li>
+ * <li>{@link QueryBuilder}: a builder pattern for constructing and
+ * running any form of query (SQL, logical or physical) and running the
+ * query in a wide variety of ways (just count the rows, return the
+ * results as a list, run using a listener, etc.)</li>
+ * <li>{@link QueryBuilder.QuerySummary QuerySummary}: a summary of a
+ * query returned from running the query. Contains the query ID, the
+ * row count, the batch count and elapsed run time.</li>
+ * <li>{@link ProfileParser}: A simple tool to load a query profile and
+ * provide access to the profile structure. Also prints the key parts of
+ * the profile for diagnostic purposes.</li>
+ * <li>{@link LogFixture}: Allows per-test changes to log settings to,
+ * say, send a particular logger to the console for easier debugging, or
+ * to suppress logging of a deliberately created failure.</li>
+ * </ul>
+ * <h3>Usage</h3>
+ * A typical test using this framework looks like this:
+ * <code><pre>
+  {@literal @}Test
+  public void exampleTest() throws Exception {
+
+    // Configure the cluster. One Drillbit by default.
+    FixtureBuilder builder = ClusterFixture.builder()
+        // Set up per-test specialized config and session options.
+        .configProperty(ExecConstants.SYS_STORE_PROVIDER_LOCAL_ENABLE_WRITE, true)
+        .configProperty(ExecConstants.REMOVER_ENABLE_GENERIC_COPIER, true)
+        .sessionOption(ExecConstants.MAX_QUERY_MEMORY_PER_NODE_KEY, 3L * 1024 * 1024 * 1024)
+        .maxParallelization(1)
+        ;
+
+    // Launch the cluster and client.
+    try (ClusterFixture cluster = builder.build();
+         ClientFixture client = cluster.clientFixture()) {
+
+      // Run a query (using the mock data source) and print a summary.
+      String sql = "SELECT id_i FROM `mock`.employee_1M ORDER BY id_i";
+      QuerySummary summary = client.queryBuilder().sql(sql).run();
+      assertEquals(1_000_000, summary.recordCount());
+      System.out.println(String.format("Sorted %,d records in %d batches.", summary.recordCount(),
summary.batchCount()));
+      System.out.println(String.format("Query Id: %s, elapsed: %d ms", summary.queryIdString(),
summary.runTimeMs()));
+      client.parseProfile(summary.queryIdString()).print();
+    }
+  }
+ * </pre></code>
+ * <p>
+ * Typical usage for the logging fixture: <pre><code>
+ * {@literal @}Test
+ * public void myTest() {
+ *   LogFixtureBuilder logBuilder = LogFixture.builder()
+ *          .toConsole()
+ *          .disable() // Silence all other loggers
+ *          .logger(ExternalSortBatch.class, Level.DEBUG);
+ *   try (LogFixture logs = logBuilder.build()) {
+ *     // Test code here
+ *   }
+ * }</code></pre>
+ *
+ */
+package org.apache.drill.test;


Mime
View raw message