camel-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From davscl...@apache.org
Subject [07/10] [CAMEL-7249] Working version of camel-hdfs2 component
Date Wed, 12 Mar 2014 13:18:40 GMT
http://git-wip-us.apache.org/repos/asf/camel/blob/af7661ab/components/camel-hdfs2/src/main/java/org/apache/camel/component/hdfs2/HdfsProducer.java
----------------------------------------------------------------------
diff --git a/components/camel-hdfs2/src/main/java/org/apache/camel/component/hdfs2/HdfsProducer.java b/components/camel-hdfs2/src/main/java/org/apache/camel/component/hdfs2/HdfsProducer.java
new file mode 100644
index 0000000..fa457ba
--- /dev/null
+++ b/components/camel-hdfs2/src/main/java/org/apache/camel/component/hdfs2/HdfsProducer.java
@@ -0,0 +1,298 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.component.hdfs2;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import javax.security.auth.login.Configuration;
+
+import org.apache.camel.Exchange;
+import org.apache.camel.Expression;
+import org.apache.camel.impl.DefaultProducer;
+import org.apache.camel.util.IOHelper;
+import org.apache.camel.util.StringHelper;
+
+public class HdfsProducer extends DefaultProducer {
+
+    private final HdfsConfiguration config;
+    private final StringBuilder hdfsPath;
+    private final AtomicBoolean idle = new AtomicBoolean(false);
+    private volatile ScheduledExecutorService scheduler;
+    private volatile HdfsOutputStream ostream;
+
+    public static final class SplitStrategy {
+        private SplitStrategyType type;
+        private long value;
+
+        public SplitStrategy(SplitStrategyType type, long value) {
+            this.type = type;
+            this.value = value;
+        }
+
+        public SplitStrategyType getType() {
+            return type;
+        }
+
+        public long getValue() {
+            return value;
+        }
+    }
+
+    public enum SplitStrategyType {
+        BYTES {
+            @Override
+            public boolean split(HdfsOutputStream oldOstream, long value, HdfsProducer producer) {
+                return oldOstream.getNumOfWrittenBytes() >= value;
+            }
+        },
+
+        MESSAGES {
+            @Override
+            public boolean split(HdfsOutputStream oldOstream, long value, HdfsProducer producer) {
+                return oldOstream.getNumOfWrittenMessages() >= value;
+            }
+        },
+
+        IDLE {
+            @Override
+            public boolean split(HdfsOutputStream oldOstream, long value, HdfsProducer producer) {
+                return producer.idle.get();
+            }
+        };
+
+        public abstract boolean split(HdfsOutputStream oldOstream, long value, HdfsProducer producer);
+    }
+
+    public HdfsProducer(HdfsEndpoint endpoint, HdfsConfiguration config) {
+        super(endpoint);
+        this.config = config;
+        this.hdfsPath = config.getFileSystemType().getHdfsPath(config);
+    }
+
+    @Override
+    public HdfsEndpoint getEndpoint() {
+        return (HdfsEndpoint) super.getEndpoint();
+    }
+
+    @Override
+    protected void doStart() throws Exception {
+        // need to remember auth as Hadoop will override that, which otherwise means the Auth is broken afterwards
+        Configuration auth = HdfsComponent.getJAASConfiguration();
+        try {
+            super.doStart();
+
+            // setup hdfs if configured to do on startup
+            if (getEndpoint().getConfig().isConnectOnStartup()) {
+                ostream = setupHdfs(true);
+            }
+
+            SplitStrategy idleStrategy = null;
+            for (SplitStrategy strategy : config.getSplitStrategies()) {
+                if (strategy.type == SplitStrategyType.IDLE) {
+                    idleStrategy = strategy;
+                    break;
+                }
+            }
+            if (idleStrategy != null) {
+                scheduler = getEndpoint().getCamelContext().getExecutorServiceManager().newSingleThreadScheduledExecutor(this, "HdfsIdleCheck");
+                log.debug("Creating IdleCheck task scheduled to run every {} millis", config.getCheckIdleInterval());
+                scheduler.scheduleAtFixedRate(new IdleCheck(idleStrategy), config.getCheckIdleInterval(), config.getCheckIdleInterval(), TimeUnit.MILLISECONDS);
+            }
+        } finally {
+            HdfsComponent.setJAASConfiguration(auth);
+        }
+    }
+
+    private synchronized HdfsOutputStream setupHdfs(boolean onStartup) throws Exception {
+        if (ostream != null) {
+            return ostream;
+        }
+
+        StringBuilder actualPath = new StringBuilder(hdfsPath);
+        if (config.getSplitStrategies().size() > 0) {
+            actualPath = newFileName();
+        }
+
+        // if we are starting up then log at info level, and if runtime then log at debug level to not flood the log
+        if (onStartup) {
+            log.info("Connecting to hdfs file-system {}:{}/{} (may take a while if connection is not available)", new Object[]{config.getHostName(), config.getPort(), actualPath.toString()});
+        } else {
+            if (log.isDebugEnabled()) {
+                log.debug("Connecting to hdfs file-system {}:{}/{} (may take a while if connection is not available)", new Object[]{config.getHostName(), config.getPort(), actualPath.toString()});
+            }
+        }
+
+        HdfsOutputStream answer = HdfsOutputStream.createOutputStream(actualPath.toString(), config);
+
+        if (onStartup) {
+            log.info("Connected to hdfs file-system {}:{}/{}", new Object[]{config.getHostName(), config.getPort(), actualPath.toString()});
+        } else {
+            if (log.isDebugEnabled()) {
+                log.debug("Connected to hdfs file-system {}:{}/{}", new Object[]{config.getHostName(), config.getPort(), actualPath.toString()});
+            }
+        }
+
+        return answer;
+    }
+
+    @Override
+    protected void doStop() throws Exception {
+        super.doStop();
+        if (scheduler != null) {
+            getEndpoint().getCamelContext().getExecutorServiceManager().shutdown(scheduler);
+            scheduler = null;
+        }
+        if (ostream != null) {
+            IOHelper.close(ostream, "output stream", log);
+            ostream = null;
+        }
+    }
+
+    @Override
+    public void process(Exchange exchange) throws Exception {
+        // need to remember auth as Hadoop will override that, which otherwise means the Auth is broken afterwards
+        Configuration auth = HdfsComponent.getJAASConfiguration();
+        try {
+            doProcess(exchange);
+        } finally {
+            HdfsComponent.setJAASConfiguration(auth);
+        }
+    }
+
+    void doProcess(Exchange exchange) throws Exception {
+        Object body = exchange.getIn().getBody();
+        Object key = exchange.getIn().getHeader(HdfsHeader.KEY.name());
+
+        // if an explicit filename is specified, close any existing stream and append the filename to the hdfsPath
+        if (exchange.getIn().getHeader(Exchange.FILE_NAME) != null) {
+            if (ostream != null) {
+                IOHelper.close(ostream, "output stream", log);
+            }
+            StringBuilder actualPath = getHdfsPathUsingFileNameHeader(exchange);
+            ostream = HdfsOutputStream.createOutputStream(actualPath.toString(), config);
+        } else if (ostream == null) {
+            // must have ostream
+            ostream = setupHdfs(false);
+        }
+
+        boolean split = false;
+        List<SplitStrategy> strategies = config.getSplitStrategies();
+        for (SplitStrategy splitStrategy : strategies) {
+            split |= splitStrategy.getType().split(ostream, splitStrategy.value, this);
+        }
+
+        if (split) {
+            if (ostream != null) {
+                IOHelper.close(ostream, "output stream", log);
+            }
+            StringBuilder actualPath = newFileName();
+            ostream = HdfsOutputStream.createOutputStream(actualPath.toString(), config);
+        }
+
+        String path = ostream.getActualPath();
+        log.trace("Writing body to hdfs-file {}", path);
+        ostream.append(key, body, exchange.getContext().getTypeConverter());
+
+        idle.set(false);
+
+        // close if we do not have idle checker task to do this for us
+        boolean close = scheduler == null;
+        // but user may have a header to explict control the close
+        Boolean closeHeader = exchange.getIn().getHeader(HdfsConstants.HDFS_CLOSE, Boolean.class);
+        if (closeHeader != null) {
+            close = closeHeader;
+        }
+
+        // if no idle checker then we need to explicit close the stream after usage
+        if (close) {
+            try {
+                HdfsProducer.this.log.trace("Closing stream");
+                ostream.close();
+                ostream = null;
+            } catch (IOException e) {
+                // ignore
+            }
+        }
+
+        log.debug("Wrote body to hdfs-file {}", path);
+    }
+
+    /**
+     * helper method to construct the hdfsPath from the CamelFileName String or Expression
+     * @param exchange
+     * @return
+     */
+    private StringBuilder getHdfsPathUsingFileNameHeader(Exchange exchange) {
+        StringBuilder actualPath = new StringBuilder(hdfsPath);
+        String fileName = "";
+        Object value = exchange.getIn().getHeader(Exchange.FILE_NAME);
+        if (value instanceof String) {
+            fileName = exchange.getContext().getTypeConverter().convertTo(String.class, exchange, value);
+        } else if (value instanceof Expression) {
+            fileName =  ((Expression) value).evaluate(exchange, String.class);
+        }
+        return actualPath.append(fileName);
+    }
+
+    private StringBuilder newFileName() {
+        StringBuilder actualPath = new StringBuilder(hdfsPath);
+        actualPath.append(StringHelper.sanitize(getEndpoint().getCamelContext().getUuidGenerator().generateUuid()));
+        return actualPath;
+    }
+
+    /**
+     * Idle check background task
+     */
+    private final class IdleCheck implements Runnable {
+
+        private final SplitStrategy strategy;
+
+        private IdleCheck(SplitStrategy strategy) {
+            this.strategy = strategy;
+        }
+
+        @Override
+        public void run() {
+            // only run if ostream has been created
+            if (ostream == null) {
+                return;
+            }
+
+            HdfsProducer.this.log.trace("IdleCheck running");
+
+            if (System.currentTimeMillis() - ostream.getLastAccess() > strategy.value && !idle.get() && !ostream.isBusy().get()) {
+                idle.set(true);
+                try {
+                    HdfsProducer.this.log.trace("Closing stream as idle");
+                    ostream.close();
+                } catch (IOException e) {
+                    // ignore
+                }
+            }
+        }
+
+        @Override
+        public String toString() {
+            return "IdleCheck";
+        }
+    }
+}
+

http://git-wip-us.apache.org/repos/asf/camel/blob/af7661ab/components/camel-hdfs2/src/main/java/org/apache/camel/component/hdfs2/HdfsWritableFactories.java
----------------------------------------------------------------------
diff --git a/components/camel-hdfs2/src/main/java/org/apache/camel/component/hdfs2/HdfsWritableFactories.java b/components/camel-hdfs2/src/main/java/org/apache/camel/component/hdfs2/HdfsWritableFactories.java
new file mode 100644
index 0000000..be7ecf0
--- /dev/null
+++ b/components/camel-hdfs2/src/main/java/org/apache/camel/component/hdfs2/HdfsWritableFactories.java
@@ -0,0 +1,310 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.component.hdfs2;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+
+import org.apache.camel.RuntimeCamelException;
+import org.apache.camel.TypeConverter;
+import org.apache.camel.util.IOHelper;
+import org.apache.hadoop.io.BooleanWritable;
+import org.apache.hadoop.io.ByteWritable;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.DoubleWritable;
+import org.apache.hadoop.io.FloatWritable;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
+
+public class HdfsWritableFactories {
+
+    public enum WritableType {
+        NULL {
+            @Override
+            public Class<NullWritable> getWritableClass() {
+                return NullWritable.class;
+            }
+        },
+
+        BOOLEAN {
+            @Override
+            public Class<BooleanWritable> getWritableClass() {
+                return BooleanWritable.class;
+            }
+        },
+
+        BYTE {
+            @Override
+            public Class<ByteWritable> getWritableClass() {
+                return ByteWritable.class;
+            }
+        },
+
+        INT {
+            @Override
+            public Class<IntWritable> getWritableClass() {
+                return IntWritable.class;
+            }
+        },
+
+        FLOAT {
+            @Override
+            public Class<FloatWritable> getWritableClass() {
+                return FloatWritable.class;
+            }
+        },
+
+        LONG {
+            @Override
+            public Class<LongWritable> getWritableClass() {
+                return LongWritable.class;
+            }
+        },
+
+        DOUBLE {
+            @Override
+            public Class<DoubleWritable> getWritableClass() {
+                return DoubleWritable.class;
+            }
+        },
+
+        TEXT {
+            @Override
+            public Class<Text> getWritableClass() {
+                return Text.class;
+            }
+        },
+
+        BYTES {
+            @Override
+            public Class<BytesWritable> getWritableClass() {
+                return BytesWritable.class;
+            }
+        };
+
+        @SuppressWarnings("rawtypes")
+        public abstract Class<? extends WritableComparable> getWritableClass();
+    }
+
+    interface HdfsWritableFactory {
+
+        Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size);
+
+        Object read(Writable writable, Holder<Integer> size);
+    }
+
+    public static final class HdfsNullWritableFactory implements HdfsWritableFactory {
+
+        @Override
+        public Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size) {
+            size.value = 0;
+            return NullWritable.get();
+        }
+
+        @Override
+        public Object read(Writable writable, Holder<Integer> size) {
+            size.value = 0;
+            return null;
+        }
+    }
+
+    public static final class HdfsByteWritableFactory implements HdfsWritableFactory {
+
+        private static final int SIZE = 1;
+
+        @Override
+        public Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size) {
+            size.value = SIZE;
+            ByteWritable writable = new ByteWritable();
+            writable.set(typeConverter.convertTo(Byte.class, value));
+            return writable;
+        }
+
+        @Override
+        public Object read(Writable writable, Holder<Integer> size) {
+            size.value = SIZE;
+            return ((ByteWritable) writable).get();
+        }
+    }
+
+    public static final class HdfsBooleanWritableFactory implements HdfsWritableFactory {
+
+        private static final int SIZE = 1;
+
+        @Override
+        public Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size) {
+            size.value = SIZE;
+            BooleanWritable writable = new BooleanWritable();
+            writable.set(typeConverter.convertTo(Boolean.class, value));
+            return writable;
+        }
+
+        @Override
+        public Object read(Writable writable, Holder<Integer> size) {
+            size.value = SIZE;
+            return ((BooleanWritable) writable).get();
+        }
+    }
+
+    public static final class HdfsBytesWritableFactory implements HdfsWritableFactory {
+
+        @Override
+        public Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size) {
+            BytesWritable writable = new BytesWritable();
+            ByteBuffer bb = (ByteBuffer) value;
+            writable.set(bb.array(), 0, bb.array().length);
+            size.value = bb.array().length;
+            return writable;
+        }
+
+        @Override
+        public Object read(Writable writable, Holder<Integer> size) {
+            size.value = ((BytesWritable) writable).getLength();
+            ByteBuffer bb = ByteBuffer.allocate(size.value);
+            bb.put(((BytesWritable) writable).getBytes(), 0, size.value);
+            return bb;
+        }
+    }
+
+    public static final class HdfsDoubleWritableFactory implements HdfsWritableFactory {
+
+        private static final int SIZE = 8;
+
+        @Override
+        public Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size) {
+            size.value = SIZE;
+            DoubleWritable writable = new DoubleWritable();
+            writable.set(typeConverter.convertTo(Double.class, value));
+            return writable;
+        }
+
+        @Override
+        public Object read(Writable writable, Holder<Integer> size) {
+            size.value = SIZE;
+            return ((DoubleWritable) writable).get();
+        }
+    }
+
+    public static final class HdfsFloatWritableFactory implements HdfsWritableFactory {
+
+        private static final int SIZE = 4;
+
+        @Override
+        public Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size) {
+            size.value = SIZE;
+            FloatWritable writable = new FloatWritable();
+            writable.set(typeConverter.convertTo(Float.class, value));
+            return writable;
+        }
+
+        @Override
+        public Object read(Writable writable, Holder<Integer> size) {
+            size.value = SIZE;
+            return ((FloatWritable) writable).get();
+        }
+    }
+
+    public static final class HdfsIntWritableFactory implements HdfsWritableFactory {
+
+        private static final int SIZE = 4;
+
+        @Override
+        public Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size) {
+            size.value = SIZE;
+            IntWritable writable = new IntWritable();
+            writable.set(typeConverter.convertTo(Integer.class, value));
+            return writable;
+        }
+
+        @Override
+        public Object read(Writable writable, Holder<Integer> size) {
+            size.value = SIZE;
+            return ((IntWritable) writable).get();
+        }
+    }
+
+    public static final class HdfsLongWritableFactory implements HdfsWritableFactory {
+
+        private static final int SIZE = 8;
+
+        @Override
+        public Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size) {
+            size.value = SIZE;
+            LongWritable writable = new LongWritable();
+            writable.set(typeConverter.convertTo(Long.class, value));
+            return writable;
+        }
+
+        @Override
+        public Object read(Writable writable, Holder<Integer> size) {
+            size.value = SIZE;
+            return ((LongWritable) writable).get();
+        }
+    }
+
+    public static final class HdfsTextWritableFactory implements HdfsWritableFactory {
+
+        @Override
+        public Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size) {
+            Text writable = new Text();
+            writable.set(typeConverter.convertTo(String.class, value));
+            size.value = writable.getBytes().length;
+            return writable;
+        }
+
+        @Override
+        public Object read(Writable writable, Holder<Integer> size) {
+            size.value = ((Text) writable).getLength();
+            return writable.toString();
+        }
+    }
+
+    public static final class HdfsObjectWritableFactory implements HdfsWritableFactory {
+
+        @Override
+        public Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size) {
+            InputStream is = null;
+            try {
+                is = typeConverter.convertTo(InputStream.class, value);
+                ByteArrayOutputStream bos = new ByteArrayOutputStream();
+                IOUtils.copyBytes(is, bos, HdfsConstants.DEFAULT_BUFFERSIZE, false);
+                BytesWritable writable = new BytesWritable();
+                writable.set(bos.toByteArray(), 0, bos.toByteArray().length);
+                size.value = bos.toByteArray().length;
+                return writable;
+            } catch (IOException ex) {
+                throw new RuntimeCamelException(ex);
+            } finally {
+                IOHelper.close(is);
+            }
+        }
+
+        @Override
+        public Object read(Writable writable, Holder<Integer> size) {
+            size.value = 0;
+            return null;
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/camel/blob/af7661ab/components/camel-hdfs2/src/main/java/org/apache/camel/component/hdfs2/Holder.java
----------------------------------------------------------------------
diff --git a/components/camel-hdfs2/src/main/java/org/apache/camel/component/hdfs2/Holder.java b/components/camel-hdfs2/src/main/java/org/apache/camel/component/hdfs2/Holder.java
new file mode 100644
index 0000000..58fbdf5
--- /dev/null
+++ b/components/camel-hdfs2/src/main/java/org/apache/camel/component/hdfs2/Holder.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.component.hdfs2;
+
+public final class Holder<T> {
+
+    /**
+     * The value contained in the holder.
+     **/
+    public T value;
+      
+    /**
+     * Creates a new holder with a <code>null</code> value.
+     **/
+    public Holder() {
+    }
+
+    /**
+     * Create a new holder with the specified value.
+     *
+     * @param value The value to be stored in the holder.
+     **/
+    public Holder(T value) {
+        this.value = value;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/camel/blob/af7661ab/components/camel-hdfs2/src/main/resources/META-INF/LICENSE.txt
----------------------------------------------------------------------
diff --git a/components/camel-hdfs2/src/main/resources/META-INF/LICENSE.txt b/components/camel-hdfs2/src/main/resources/META-INF/LICENSE.txt
new file mode 100755
index 0000000..6b0b127
--- /dev/null
+++ b/components/camel-hdfs2/src/main/resources/META-INF/LICENSE.txt
@@ -0,0 +1,203 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+

http://git-wip-us.apache.org/repos/asf/camel/blob/af7661ab/components/camel-hdfs2/src/main/resources/META-INF/NOTICE.txt
----------------------------------------------------------------------
diff --git a/components/camel-hdfs2/src/main/resources/META-INF/NOTICE.txt b/components/camel-hdfs2/src/main/resources/META-INF/NOTICE.txt
new file mode 100644
index 0000000..2e215bf
--- /dev/null
+++ b/components/camel-hdfs2/src/main/resources/META-INF/NOTICE.txt
@@ -0,0 +1,11 @@
+   =========================================================================
+   ==  NOTICE file corresponding to the section 4 d of                    ==
+   ==  the Apache License, Version 2.0,                                   ==
+   ==  in this case for the Apache Camel distribution.                    ==
+   =========================================================================
+
+   This product includes software developed by
+   The Apache Software Foundation (http://www.apache.org/).
+
+   Please read the different LICENSE files present in the licenses directory of
+   this distribution.

http://git-wip-us.apache.org/repos/asf/camel/blob/af7661ab/components/camel-hdfs2/src/main/resources/META-INF/services/org/apache/camel/component/hdfs2
----------------------------------------------------------------------
diff --git a/components/camel-hdfs2/src/main/resources/META-INF/services/org/apache/camel/component/hdfs2 b/components/camel-hdfs2/src/main/resources/META-INF/services/org/apache/camel/component/hdfs2
new file mode 100644
index 0000000..acde85a
--- /dev/null
+++ b/components/camel-hdfs2/src/main/resources/META-INF/services/org/apache/camel/component/hdfs2
@@ -0,0 +1,18 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+class=org.apache.camel.component.hdfs2.HdfsComponent

http://git-wip-us.apache.org/repos/asf/camel/blob/af7661ab/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/FromFileToHdfsTest.java
----------------------------------------------------------------------
diff --git a/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/FromFileToHdfsTest.java b/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/FromFileToHdfsTest.java
new file mode 100644
index 0000000..ebb0b1e
--- /dev/null
+++ b/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/FromFileToHdfsTest.java
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.component.hdfs2;
+
+import java.io.File;
+
+import org.apache.camel.Exchange;
+import org.apache.camel.builder.NotifyBuilder;
+import org.apache.camel.builder.RouteBuilder;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ *
+ */
+public class FromFileToHdfsTest extends HdfsTestSupport {
+
+    private static final Path TEMP_DIR = new Path(new File("target/outbox/").getAbsolutePath());
+
+    @Before
+    public void setUp() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+        deleteDirectory("target/inbox");
+        deleteDirectory("target/outbox");
+        super.setUp();
+    }
+
+    @Override
+    public void tearDown() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        super.tearDown();
+        Configuration conf = new Configuration();
+        Path dir = new Path("target/outbox");
+        FileSystem fs = FileSystem.get(dir.toUri(), conf);
+        fs.delete(dir, true);
+    }
+
+    @Test
+    public void testFileToHdfs() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        NotifyBuilder notify = new NotifyBuilder(context).whenDone(1).create();
+
+        template.sendBodyAndHeader("file:target/inbox", "Hello World", Exchange.FILE_NAME, "hello.txt");
+
+        notify.matchesMockWaitTime();
+
+        File delete = new File("target/inbox/hello.txt");
+        assertTrue("File should be deleted " + delete, !delete.exists());
+
+        File create = new File(TEMP_DIR + "/output.txt");
+        assertTrue("File should be created " + create, create.exists());
+    }
+
+    @Test
+    public void testTwoFilesToHdfs() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        NotifyBuilder notify = new NotifyBuilder(context).whenDone(2).create();
+
+        template.sendBodyAndHeader("file:target/inbox", "Hello World", Exchange.FILE_NAME, "hello.txt");
+        template.sendBodyAndHeader("file:target/inbox", "Bye World", Exchange.FILE_NAME, "bye.txt");
+
+        notify.matchesMockWaitTime();
+
+        File delete = new File("target/inbox/hello.txt");
+        assertTrue("File should be deleted " + delete, !delete.exists());
+        delete = new File("target/inbox/bye.txt");
+        assertTrue("File should be deleted " + delete, !delete.exists());
+
+        File create = new File(TEMP_DIR + "/output.txt");
+        assertTrue("File should be created " + create, create.exists());
+    }
+
+    @Override
+    protected RouteBuilder createRouteBuilder() throws Exception {
+        return new RouteBuilder() {
+            @Override
+            public void configure() throws Exception {
+                from("file:target/inbox?delete=true")
+                    .to("hdfs2:///" + TEMP_DIR.toUri() + "/output.txt?fileSystemType=LOCAL");
+            }
+        };
+    }
+}

http://git-wip-us.apache.org/repos/asf/camel/blob/af7661ab/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/HdfsConsumerTest.java
----------------------------------------------------------------------
diff --git a/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/HdfsConsumerTest.java b/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/HdfsConsumerTest.java
new file mode 100644
index 0000000..8588030
--- /dev/null
+++ b/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/HdfsConsumerTest.java
@@ -0,0 +1,391 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.component.hdfs2;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.camel.builder.RouteBuilder;
+import org.apache.camel.component.mock.MockEndpoint;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.ArrayFile;
+import org.apache.hadoop.io.BooleanWritable;
+import org.apache.hadoop.io.ByteWritable;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.DoubleWritable;
+import org.apache.hadoop.io.FloatWritable;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.SequenceFile.Writer;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.util.Progressable;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.apache.hadoop.io.SequenceFile.CompressionType;
+
+public class HdfsConsumerTest extends HdfsTestSupport {
+
+    @Override
+    public boolean isUseRouteBuilder() {
+        return false;
+    }
+
+    @Before
+    public void setUp() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        // must be able to get security configuration
+        try {
+            javax.security.auth.login.Configuration.getConfiguration();
+        } catch (Exception e) {
+            return;
+        }
+
+        deleteDirectory("target/test");
+        super.setUp();
+    }
+    
+    @Test
+    public void testSimpleConsumer() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        final Path file = new Path(new File("target/test/test-camel-normal-file").getAbsolutePath());
+        Configuration conf = new Configuration();
+        FileSystem fs = FileSystem.get(file.toUri(), conf);
+        FSDataOutputStream out = fs.create(file);
+        for (int i = 0; i < 1024; ++i) {
+            out.write(("PIPPO" + i).getBytes("UTF-8"));
+            out.flush();
+        }
+        out.close();
+
+        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
+        resultEndpoint.expectedMessageCount(2);
+
+        context.addRoutes(new RouteBuilder() {
+            public void configure() {
+                from("hdfs2:///" + file.toUri() + "?fileSystemType=LOCAL&chunkSize=4096&initialDelay=0").to("mock:result");
+            }
+        });
+        context.start();
+
+        resultEndpoint.assertIsSatisfied();
+    }
+
+    @Test
+    public void testReadBoolean() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        final Path file = new Path(new File("target/test/test-camel-boolean").getAbsolutePath());
+        Configuration conf = new Configuration();
+        SequenceFile.Writer writer = createWriter(conf, file, NullWritable.class, BooleanWritable.class);
+        NullWritable keyWritable = NullWritable.get();
+        BooleanWritable valueWritable = new BooleanWritable();
+        valueWritable.set(true);
+        writer.append(keyWritable, valueWritable);
+        writer.sync();
+        writer.close();
+
+        context.addRoutes(new RouteBuilder() {
+            public void configure() {
+                from("hdfs2:///" + file.toUri() + "?fileSystemType=LOCAL&fileType=SEQUENCE_FILE&initialDelay=0").to("mock:result");
+            }
+        });
+        context.start();
+
+        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
+        resultEndpoint.expectedMessageCount(1);
+        resultEndpoint.assertIsSatisfied();
+    }
+
+    @Test
+    public void testReadByte() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        final Path file = new Path(new File("target/test/test-camel-byte").getAbsolutePath());
+        Configuration conf = new Configuration();
+        SequenceFile.Writer writer = createWriter(conf, file, NullWritable.class, ByteWritable.class);
+        NullWritable keyWritable = NullWritable.get();
+        ByteWritable valueWritable = new ByteWritable();
+        byte value = 3;
+        valueWritable.set(value);
+        writer.append(keyWritable, valueWritable);
+        writer.sync();
+        writer.close();
+
+        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
+        resultEndpoint.expectedMessageCount(1);
+        resultEndpoint.message(0).body(byte.class).isEqualTo(3);
+
+        context.addRoutes(new RouteBuilder() {
+            public void configure() {
+                from("hdfs2:///" + file.toUri() + "?fileSystemType=LOCAL&fileType=SEQUENCE_FILE&initialDelay=0").to("mock:result");
+            }
+        });
+        context.start();
+
+        resultEndpoint.assertIsSatisfied();
+    }
+
+    @Test
+    public void testReadFloat() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        final Path file = new Path(new File("target/test/test-camel-float").getAbsolutePath());
+        Configuration conf = new Configuration();
+        SequenceFile.Writer writer = createWriter(conf, file, NullWritable.class, FloatWritable.class);
+        NullWritable keyWritable = NullWritable.get();
+        FloatWritable valueWritable = new FloatWritable();
+        float value = 3.1415926535f;
+        valueWritable.set(value);
+        writer.append(keyWritable, valueWritable);
+        writer.sync();
+        writer.close();
+
+        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
+        resultEndpoint.expectedMessageCount(1);
+
+        context.addRoutes(new RouteBuilder() {
+            public void configure() {
+                from("hdfs2:///" + file.toUri() + "??fileSystemType=LOCAL&fileType=SEQUENCE_FILE&initialDelay=0").to("mock:result");
+            }
+        });
+        context.start();
+
+        resultEndpoint.assertIsSatisfied();
+    }
+
+    @Test
+    public void testReadDouble() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        final Path file = new Path(new File("target/test/test-camel-double").getAbsolutePath());
+        Configuration conf = new Configuration();
+        SequenceFile.Writer writer = createWriter(conf, file, NullWritable.class, DoubleWritable.class);
+        NullWritable keyWritable = NullWritable.get();
+        DoubleWritable valueWritable = new DoubleWritable();
+        double value = 3.1415926535;
+        valueWritable.set(value);
+        writer.append(keyWritable, valueWritable);
+        writer.sync();
+        writer.close();
+
+        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
+        resultEndpoint.expectedMessageCount(1);
+
+        context.addRoutes(new RouteBuilder() {
+            public void configure() {
+                from("hdfs2:///" + file.toUri() + "??fileSystemType=LOCAL&fileType=SEQUENCE_FILE&initialDelay=0").to("mock:result");
+            }
+        });
+        context.start();
+
+        resultEndpoint.assertIsSatisfied();
+    }
+
+    @Test
+    public void testReadInt() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        final Path file = new Path(new File("target/test/test-camel-int").getAbsolutePath());
+        Configuration conf = new Configuration();
+        SequenceFile.Writer writer = createWriter(conf, file, NullWritable.class, IntWritable.class);
+        NullWritable keyWritable = NullWritable.get();
+        IntWritable valueWritable = new IntWritable();
+        int value = 314159265;
+        valueWritable.set(value);
+        writer.append(keyWritable, valueWritable);
+        writer.sync();
+        writer.close();
+
+        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
+        resultEndpoint.expectedMessageCount(1);
+
+        context.addRoutes(new RouteBuilder() {
+            public void configure() {
+                from("hdfs2:///" + file.toUri() + "?fileSystemType=LOCAL&fileType=SEQUENCE_FILE&initialDelay=0").to("mock:result");
+            }
+        });
+        context.start();
+
+        resultEndpoint.assertIsSatisfied();
+    }
+
+    @Test
+    public void testReadLong() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        final Path file = new Path(new File("target/test/test-camel-long").getAbsolutePath());
+        Configuration conf = new Configuration();
+        SequenceFile.Writer writer = createWriter(conf, file, NullWritable.class, LongWritable.class);
+        NullWritable keyWritable = NullWritable.get();
+        LongWritable valueWritable = new LongWritable();
+        long value = 31415926535L;
+        valueWritable.set(value);
+        writer.append(keyWritable, valueWritable);
+        writer.sync();
+        writer.close();
+
+        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
+        resultEndpoint.expectedMessageCount(1);
+
+        context.addRoutes(new RouteBuilder() {
+            public void configure() {
+                from("hdfs2:///" + file.toUri() + "?fileSystemType=LOCAL&fileType=SEQUENCE_FILE&initialDelay=0").to("mock:result");
+            }
+        });
+        context.start();
+
+        resultEndpoint.assertIsSatisfied();
+    }
+
+    @Test
+    public void testReadBytes() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        final Path file = new Path(new File("target/test/test-camel-bytes").getAbsolutePath());
+        Configuration conf = new Configuration();
+        SequenceFile.Writer writer = createWriter(conf, file, NullWritable.class, BytesWritable.class);
+        NullWritable keyWritable = NullWritable.get();
+        BytesWritable valueWritable = new BytesWritable();
+        String value = "CIAO!";
+        valueWritable.set(value.getBytes(), 0, value.getBytes().length);
+        writer.append(keyWritable, valueWritable);
+        writer.sync();
+        writer.close();
+
+        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
+        resultEndpoint.expectedMessageCount(1);
+
+        context.addRoutes(new RouteBuilder() {
+            public void configure() {
+                from("hdfs2:///" + file.toUri() + "?fileSystemType=LOCAL&fileType=SEQUENCE_FILE&initialDelay=0").to("mock:result");
+            }
+        });
+        context.start();
+
+        resultEndpoint.assertIsSatisfied();
+    }
+
+    @Test
+    public void testReadString() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        final Path file = new Path(new File("target/test/test-camel-string").getAbsolutePath());
+        Configuration conf = new Configuration();
+        SequenceFile.Writer writer = createWriter(conf, file, NullWritable.class, Text.class);
+        NullWritable keyWritable = NullWritable.get();
+        Text valueWritable = new Text();
+        String value = "CIAO!";
+        valueWritable.set(value);
+        writer.append(keyWritable, valueWritable);
+        writer.sync();
+        writer.close();
+
+        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
+        resultEndpoint.expectedMessageCount(1);
+
+        context.addRoutes(new RouteBuilder() {
+            public void configure() {
+                from("hdfs2:///" + file.toUri() + "?fileSystemType=LOCAL&fileType=SEQUENCE_FILE&initialDelay=0").to("mock:result");
+            }
+        });
+        context.start();
+
+        resultEndpoint.assertIsSatisfied();
+    }
+
+    @Test
+    public void testReadStringArrayFile() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        final Path file = new Path(new File("target/test/test-camel-string").getAbsolutePath());
+        Configuration conf = new Configuration();
+        FileSystem fs1 = FileSystem.get(file.toUri(), conf);
+        ArrayFile.Writer writer = new ArrayFile.Writer(conf, fs1, "target/test/test-camel-string1", Text.class, CompressionType.NONE, new Progressable() {
+            @Override
+            public void progress() {
+            }
+        });
+        Text valueWritable = new Text();
+        String value = "CIAO!";
+        valueWritable.set(value);
+        writer.append(valueWritable);
+        writer.close();
+
+        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
+        resultEndpoint.expectedMessageCount(1);
+
+        context.addRoutes(new RouteBuilder() {
+            public void configure() {
+                from("hdfs2:///" + file.getParent().toUri() + "?fileSystemType=LOCAL&fileType=ARRAY_FILE&initialDelay=0").to("mock:result");
+            }
+        });
+        context.start();
+
+        resultEndpoint.assertIsSatisfied();
+    }
+
+    @Override
+    public void tearDown() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        super.tearDown();
+        Thread.sleep(100);
+        Configuration conf = new Configuration();
+        Path dir = new Path("target/test");
+        FileSystem fs = FileSystem.get(dir.toUri(), conf);
+        fs.delete(dir, true);
+    }
+
+    private Writer createWriter(Configuration conf, Path file, Class<?> keyClass,
+        Class<?> valueClass) throws IOException {
+        return SequenceFile.createWriter(conf, SequenceFile.Writer.file(file),
+                SequenceFile.Writer.keyClass(keyClass), SequenceFile.Writer.valueClass(valueClass));
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/camel/blob/af7661ab/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/HdfsProducerConsumerTest.java
----------------------------------------------------------------------
diff --git a/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/HdfsProducerConsumerTest.java b/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/HdfsProducerConsumerTest.java
new file mode 100644
index 0000000..89d1179
--- /dev/null
+++ b/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/HdfsProducerConsumerTest.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.component.hdfs2;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.camel.Exchange;
+import org.apache.camel.builder.RouteBuilder;
+import org.apache.camel.component.mock.MockEndpoint;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.Before;
+import org.junit.Test;
+
+public class HdfsProducerConsumerTest extends HdfsTestSupport {
+
+    @Before
+    public void setUp() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+        super.setUp();
+    }
+    
+    @Override
+    public boolean isUseRouteBuilder() {
+        return false;
+    }
+
+    @Test
+    public void testSimpleSplitWriteRead() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        final Path file = new Path(new File("target/test/test-camel-simple-write-file").getAbsolutePath());
+
+        context.addRoutes(new RouteBuilder() {
+            @Override
+            public void configure() {
+                from("direct:start").to("hdfs2:///" + file.toUri() + "?fileSystemType=LOCAL&splitStrategy=BYTES:5,IDLE:1000");
+                from("hdfs2:///" + file.toUri() + "?initialDelay=2000&fileSystemType=LOCAL&chunkSize=5").to("mock:result");
+            }
+        });
+        context.start();
+
+        List<String> expectedResults = new ArrayList<String>();
+        for (int i = 0; i < 10; ++i) {
+            template.sendBody("direct:start", "CIAO" + i);
+            expectedResults.add("CIAO" + i);
+        }
+
+        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
+
+        resultEndpoint.expectedMessageCount(10);
+        resultEndpoint.assertIsSatisfied();
+
+        List<Exchange> exchanges = resultEndpoint.getExchanges();
+        assertEquals(10, exchanges.size());
+        resultEndpoint.expectedBodiesReceivedInAnyOrder(expectedResults);
+    }
+
+    @Override
+    public void tearDown() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        super.tearDown();
+        Thread.sleep(100);
+        Configuration conf = new Configuration();
+        Path dir = new Path("target/test");
+        FileSystem fs = FileSystem.get(dir.toUri(), conf);
+        fs.delete(dir, true);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/camel/blob/af7661ab/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/HdfsProducerSplitTest.java
----------------------------------------------------------------------
diff --git a/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/HdfsProducerSplitTest.java b/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/HdfsProducerSplitTest.java
new file mode 100644
index 0000000..22d5ac2
--- /dev/null
+++ b/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/HdfsProducerSplitTest.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.component.hdfs2;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.InputStreamReader;
+
+import org.apache.camel.builder.RouteBuilder;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.Before;
+import org.junit.Test;
+
+public class HdfsProducerSplitTest extends HdfsTestSupport {
+
+    private static final Path BASE_FILE = new Path(new File("target/test/test-camel-simple-write-BASE_FILE").getAbsolutePath());
+
+    @Before
+    public void setUp() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+        super.setUp();
+    }
+    
+    @Test
+    public void testSimpleWriteFileWithMessageSplit() throws Exception {
+        doTest(1);
+    }
+
+    @Test
+    public void testSimpleWriteFileWithBytesSplit() throws Exception {
+        doTest(2);
+    }
+
+    @Test
+    public void testSimpleWriteFileWithIdleSplit() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        for (int i = 0; i < 3; ++i) {
+            template.sendBody("direct:start3", "CIAO" + i);
+            Thread.sleep(2000);
+        }
+
+        // stop Camel to flush and close file stream
+        stopCamelContext();
+
+        FileSystem fs = FileSystem.get(new Configuration());
+        FileStatus[] status = fs.listStatus(new Path("file:///" + BASE_FILE.toUri() + "3"));
+        assertEquals(3, status.length);
+        for (int i = 0; i < 3; i++) {
+            BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(status[i].getPath())));
+            assertTrue(br.readLine().startsWith("CIAO"));
+            assertNull(br.readLine());
+        }
+    }
+
+    @Test
+    public void testSimpleWriteFileWithMessageIdleSplit() throws Exception {
+        doTest(4);
+    }
+
+    @Test
+    public void testSimpleWriteFileWithBytesIdleSplit() throws Exception {
+        doTest(5);
+    }
+
+    private void doTest(int routeNr) throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        for (int i = 0; i < 10; ++i) {
+            template.sendBody("direct:start" + routeNr, "CIAO" + i);
+        }
+        stopCamelContext();
+
+        FileSystem fs = FileSystem.get(new Configuration());
+        FileStatus[] status = fs.listStatus(new Path("file:///" + BASE_FILE.toUri() + routeNr));
+        assertEquals(10, status.length);
+        for (int i = 0; i < status.length; i++) {
+            BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(status[i].getPath())));
+            assertTrue(br.readLine().startsWith("CIAO"));
+            assertNull(br.readLine());
+        }
+    }
+
+    @Override
+    public void tearDown() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        super.tearDown();
+        Thread.sleep(100);
+        Configuration conf = new Configuration();
+        Path dir = new Path("target/test");
+        FileSystem fs = FileSystem.get(dir.toUri(), conf);
+        fs.delete(dir, true);
+    }
+
+    @Override
+    protected RouteBuilder createRouteBuilder() throws Exception {
+        return new RouteBuilder() {
+            @Override
+            public void configure() throws Exception {
+                from("direct:start1").to("hdfs2:///" + BASE_FILE.toUri() + "1?fileSystemType=LOCAL&splitStrategy=MESSAGES:1");
+                from("direct:start2").to("hdfs2:///" + BASE_FILE.toUri() + "2?fileSystemType=LOCAL&splitStrategy=BYTES:5");
+                from("direct:start3").to("hdfs2:///" + BASE_FILE.toUri() + "3?fileSystemType=LOCAL&splitStrategy=IDLE:1000");
+                from("direct:start4").to("hdfs2:///" + BASE_FILE.toUri() + "4?fileSystemType=LOCAL&splitStrategy=IDLE:1000,MESSAGES:1");
+                from("direct:start5").to("hdfs2:///" + BASE_FILE.toUri() + "5?fileSystemType=LOCAL&splitStrategy=IDLE:1000,BYTES:5");
+            }
+        };
+    }
+}

http://git-wip-us.apache.org/repos/asf/camel/blob/af7661ab/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/HdfsProducerTest.java
----------------------------------------------------------------------
diff --git a/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/HdfsProducerTest.java b/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/HdfsProducerTest.java
new file mode 100644
index 0000000..18e1ff7
--- /dev/null
+++ b/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/HdfsProducerTest.java
@@ -0,0 +1,424 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.component.hdfs2;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.InputStream;
+import java.net.URL;
+
+import org.apache.camel.Exchange;
+import org.apache.camel.builder.RouteBuilder;
+import org.apache.camel.component.hdfs2.HdfsConstants;
+import org.apache.camel.util.IOHelper;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.ArrayFile;
+import org.apache.hadoop.io.BloomMapFile;
+import org.apache.hadoop.io.BooleanWritable;
+import org.apache.hadoop.io.ByteWritable;
+import org.apache.hadoop.io.DoubleWritable;
+import org.apache.hadoop.io.FloatWritable;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.MapFile;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.apache.camel.language.simple.SimpleLanguage.simple;
+
+public class HdfsProducerTest extends HdfsTestSupport {
+
+    private static final Path TEMP_DIR = new Path(new File("target/test/").getAbsolutePath());
+
+    @Before
+    public void setUp() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+        super.setUp();
+    }
+
+    @Test
+    public void testProducer() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+        template.sendBody("direct:start1", "PAPPO");
+
+        Configuration conf = new Configuration();
+        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel1");
+        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
+        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
+        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
+        reader.next(key, value);
+        assertEquals("PAPPO", value.toString());
+
+        IOHelper.close(reader);
+    }
+
+    @Test
+    public void testProducerClose() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+        for (int i = 0; i < 10; ++i) {
+            // send 10 messages, and mark to close in last message
+            template.sendBodyAndHeader("direct:start1", "PAPPO" + i, HdfsConstants.HDFS_CLOSE, i == 9 ? true : false);
+        }
+
+        Configuration conf = new Configuration();
+        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel1");
+        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
+        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
+        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
+
+        int i = 0;
+        while (reader.next(key, value)) {
+            Text txt = (Text) value;
+            assertEquals("PAPPO" + i, txt.toString());
+            ++i;
+        }
+
+        IOHelper.close(reader);
+    }
+
+    @Test
+    public void testWriteBoolean() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+        Boolean aBoolean = true;
+        template.sendBody("direct:write_boolean", aBoolean);
+
+        Configuration conf = new Configuration();
+        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-boolean");
+        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
+        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
+        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
+        reader.next(key, value);
+        Boolean rBoolean = ((BooleanWritable) value).get();
+        assertEquals(rBoolean, aBoolean);
+
+        IOHelper.close(reader);
+    }
+
+    @Test
+    public void testWriteByte() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+        byte aByte = 8;
+        template.sendBody("direct:write_byte", aByte);
+
+        Configuration conf = new Configuration();
+        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-byte");
+        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
+        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
+        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
+        reader.next(key, value);
+        byte rByte = ((ByteWritable) value).get();
+        assertEquals(rByte, aByte);
+
+        IOHelper.close(reader);
+    }
+
+    @Test
+    public void testWriteInt() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+        int anInt = 1234;
+        template.sendBody("direct:write_int", anInt);
+
+        Configuration conf = new Configuration();
+        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-int");
+        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
+        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
+        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
+        reader.next(key, value);
+        int rInt = ((IntWritable) value).get();
+        assertEquals(rInt, anInt);
+
+        IOHelper.close(reader);
+    }
+
+    @Test
+    public void testWriteFloat() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+        float aFloat = 12.34f;
+        template.sendBody("direct:write_float", aFloat);
+
+        Configuration conf = new Configuration();
+        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-float");
+        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
+        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
+        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
+        reader.next(key, value);
+        float rFloat = ((FloatWritable) value).get();
+        assertEquals(rFloat, aFloat, 0.0F);
+
+        IOHelper.close(reader);
+    }
+
+    @Test
+    public void testWriteDouble() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+        Double aDouble = 12.34D;
+        template.sendBody("direct:write_double", aDouble);
+
+        Configuration conf = new Configuration();
+        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-double");
+        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
+        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
+        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
+        reader.next(key, value);
+        Double rDouble = ((DoubleWritable) value).get();
+        assertEquals(rDouble, aDouble);
+
+        IOHelper.close(reader);
+    }
+
+    @Test
+    public void testWriteLong() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+        long aLong = 1234567890;
+        template.sendBody("direct:write_long", aLong);
+
+        Configuration conf = new Configuration();
+        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-long");
+        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
+        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
+        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
+        reader.next(key, value);
+        long rLong = ((LongWritable) value).get();
+        assertEquals(rLong, aLong);
+
+        IOHelper.close(reader);
+    }
+
+    @Test
+    public void testWriteText() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+        String txt = "CIAO MONDO !";
+        template.sendBody("direct:write_text1", txt);
+
+        Configuration conf = new Configuration();
+        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-text1");
+        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
+        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
+        Text value = (Text) ReflectionUtils.newInstance(reader.getValueClass(), conf);
+        reader.next(key, value);
+        String rTxt = value.toString();
+        assertEquals(rTxt, txt);
+
+        IOHelper.close(reader);
+    }
+
+    @Test
+    public void testWriteTextWithKey() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+        String txtKey = "THEKEY";
+        String txtValue = "CIAO MONDO !";
+        template.sendBodyAndHeader("direct:write_text2", txtValue, "KEY", txtKey);
+
+        Configuration conf = new Configuration();
+        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-text2");
+        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
+        Text key = (Text) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
+        Text value = (Text) ReflectionUtils.newInstance(reader.getValueClass(), conf);
+        reader.next(key, value);
+        assertEquals(key.toString(), txtKey);
+        assertEquals(value.toString(), txtValue);
+
+        IOHelper.close(reader);
+    }
+
+    @Test
+    public void testMapWriteTextWithKey() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+        String txtKey = "THEKEY";
+        String txtValue = "CIAO MONDO !";
+        template.sendBodyAndHeader("direct:write_text3", txtValue, "KEY", txtKey);
+
+        Configuration conf = new Configuration();
+        MapFile.Reader reader = new MapFile.Reader(new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-text3"), conf);
+        Text key = (Text) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
+        Text value = (Text) ReflectionUtils.newInstance(reader.getValueClass(), conf);
+        reader.next(key, value);
+        assertEquals(key.toString(), txtKey);
+        assertEquals(value.toString(), txtValue);
+
+        IOHelper.close(reader);
+    }
+
+    @Test
+    public void testArrayWriteText() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+        String txtValue = "CIAO MONDO !";
+        template.sendBody("direct:write_text4", txtValue);
+
+        Configuration conf = new Configuration();
+        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-text4");
+        FileSystem fs1 = FileSystem.get(file1.toUri(), conf);
+        ArrayFile.Reader reader = new ArrayFile.Reader(fs1, "file:///" + TEMP_DIR.toUri() + "/test-camel-text4", conf);
+        Text value = (Text) ReflectionUtils.newInstance(reader.getValueClass(), conf);
+        reader.next(value);
+        assertEquals(value.toString(), txtValue);
+
+        IOHelper.close(reader);
+    }
+
+    @Test
+    public void testBloomMapWriteText() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+        String txtKey = "THEKEY";
+        String txtValue = "CIAO MONDO !";
+        template.sendBodyAndHeader("direct:write_text5", txtValue, "KEY", txtKey);
+
+        Configuration conf = new Configuration();
+        BloomMapFile.Reader reader = new BloomMapFile.Reader(new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-text5"), conf);
+        Text key = (Text) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
+        Text value = (Text) ReflectionUtils.newInstance(reader.getValueClass(), conf);
+        reader.next(key, value);
+        assertEquals(key.toString(), txtKey);
+        assertEquals(value.toString(), txtValue);
+
+        IOHelper.close(reader);
+    }
+
+    @Test
+    public void testWriteTextWithDynamicFilename() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        for (int i = 0; i < 5; i++) {
+            template.sendBodyAndHeader("direct:write_dynamic_filename", "CIAO" + i, Exchange.FILE_NAME, "file" + i);
+        }
+
+        for (int i = 0; i < 5; i++) {
+            InputStream in = null;
+            try {
+                in = new URL("file:///" + TEMP_DIR.toUri() + "/test-camel-dynamic/file" + i).openStream();
+                ByteArrayOutputStream bos = new ByteArrayOutputStream();
+                IOUtils.copyBytes(in, bos, 4096, false);
+                assertEquals("CIAO" + i, new String(bos.toByteArray()));
+            } finally {
+                IOHelper.close(in);
+            }
+        }
+    }
+
+    @Test
+    public void testWriteTextWithDynamicFilenameExpression() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+
+        for (int i = 0; i < 5; i++) {
+            template.sendBodyAndHeader("direct:write_dynamic_filename", "CIAO" + i, Exchange.FILE_NAME, simple("file-${body}"));
+        }
+
+        for (int i = 0; i < 5; i++) {
+            InputStream in = null;
+            try {
+                in = new URL("file:///" + TEMP_DIR.toUri() + "/test-camel-dynamic/file-CIAO" + i).openStream();
+                ByteArrayOutputStream bos = new ByteArrayOutputStream();
+                IOUtils.copyBytes(in, bos, 4096, false);
+                assertEquals("CIAO" + i, new String(bos.toByteArray()));
+            } finally {
+                IOHelper.close(in);
+            }
+        }
+    }
+
+    @Override
+    public void tearDown() throws Exception {
+        if (!canTest()) {
+            return;
+        }
+        super.tearDown();
+
+        Thread.sleep(250);
+        Configuration conf = new Configuration();
+        Path dir = new Path("target/test");
+        FileSystem fs = FileSystem.get(dir.toUri(), conf);
+        fs.delete(dir, true);
+    }
+
+    @Override
+    protected RouteBuilder createRouteBuilder() throws Exception {
+        return new RouteBuilder() {
+
+            @Override
+            public void configure() throws Exception {
+                from("direct:start1").to("hdfs2:///" + TEMP_DIR.toUri() + "/test-camel1?fileSystemType=LOCAL&valueType=TEXT&fileType=SEQUENCE_FILE");
+
+                /* For testing writables */
+                from("direct:write_boolean").to("hdfs2:///" + TEMP_DIR.toUri() + "/test-camel-boolean?fileSystemType=LOCAL&valueType=BOOLEAN&fileType=SEQUENCE_FILE");
+
+                from("direct:write_byte").to("hdfs2:///" + TEMP_DIR.toUri() + "/test-camel-byte?fileSystemType=LOCAL&valueType=BYTE&fileType=SEQUENCE_FILE");
+
+                from("direct:write_int").to("hdfs2:///" + TEMP_DIR.toUri() + "/test-camel-int?fileSystemType=LOCAL&valueType=INT&fileType=SEQUENCE_FILE");
+
+                from("direct:write_float").to("hdfs2:///" + TEMP_DIR.toUri() + "/test-camel-float?fileSystemType=LOCAL&valueType=FLOAT&fileType=SEQUENCE_FILE");
+
+                from("direct:write_long").to("hdfs2:///" + TEMP_DIR.toUri() + "/test-camel-long?fileSystemType=LOCAL&valueType=LONG&fileType=SEQUENCE_FILE");
+
+                from("direct:write_double").to("hdfs2:///" + TEMP_DIR.toUri() + "/test-camel-double?fileSystemType=LOCAL&valueType=DOUBLE&fileType=SEQUENCE_FILE");
+
+                from("direct:write_text1").to("hdfs2:///" + TEMP_DIR.toUri() + "/test-camel-text1?fileSystemType=LOCAL&valueType=TEXT&fileType=SEQUENCE_FILE");
+
+                /* For testing key and value writing */
+                from("direct:write_text2").to("hdfs2:///" + TEMP_DIR.toUri() + "/test-camel-text2?fileSystemType=LOCAL&keyType=TEXT&valueType=TEXT&fileType=SEQUENCE_FILE");
+
+                from("direct:write_text3").to("hdfs2:///" + TEMP_DIR.toUri() + "/test-camel-text3?fileSystemType=LOCAL&keyType=TEXT&valueType=TEXT&fileType=MAP_FILE");
+
+                /* For testing ArrayFile */
+                from("direct:write_text4").to("hdfs2:///" + TEMP_DIR.toUri() + "/test-camel-text4?fileSystemType=LOCAL&valueType=TEXT&fileType=ARRAY_FILE");
+
+                /* For testing BloomMapFile */
+                from("direct:write_text5").to("hdfs2:///" + TEMP_DIR.toUri() + "/test-camel-text5?fileSystemType=LOCAL&keyType=TEXT&valueType=TEXT&fileType=BLOOMMAP_FILE");
+
+                from("direct:write_dynamic_filename").to("hdfs2:///" + TEMP_DIR.toUri() + "/test-camel-dynamic/?fileSystemType=LOCAL&valueType=TEXT");
+            }
+        };
+    }
+}

http://git-wip-us.apache.org/repos/asf/camel/blob/af7661ab/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/HdfsTestSupport.java
----------------------------------------------------------------------
diff --git a/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/HdfsTestSupport.java b/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/HdfsTestSupport.java
new file mode 100644
index 0000000..13b8136
--- /dev/null
+++ b/components/camel-hdfs2/src/test/java/org/apache/camel/component/hdfs2/HdfsTestSupport.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.component.hdfs2;
+
+import org.apache.camel.test.junit4.CamelTestSupport;
+
+public abstract class HdfsTestSupport extends CamelTestSupport {
+
+    public boolean canTest() {
+        // Hadoop doesn't run on IBM JDK
+        if (System.getProperty("java.vendor").contains("IBM")) {
+            return false;
+        }
+
+        // must be able to get security configuration
+        try {
+            javax.security.auth.login.Configuration.getConfiguration();
+        } catch (Exception e) {
+            log.debug("Cannot run test due security exception", e);
+            return false;
+        }
+
+        return true;
+    }
+
+}


Mime
View raw message