incubator-chukwa-user mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From Preetam Patil <pvpatil.i...@gmail.com>
Subject Re: A demo setup on a single linux server
Date Mon, 06 Jun 2011 10:38:03 GMT
Thanks, Eric, for filing the bug report and the patch.
I tried the patch and now the collector is able to connect to the HBase.
However, from what I understand from the logs, the collector is trying to
write to a wrong table (or a wrong info to a table).
I tried to look into the patch to find the reason for this mismatch but
could not find any. Could you please help?
Thanks,
-preetam

>From the HBase.log:
2011-06-06 16:06:15,747 WARN org.apache.hadoop.hbase.regionserver.HRegion:
No such column family in batch put
org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException: Column
family chukwaAgent_chunkQueue does not exist in region
Hadoop,,1307351718977.567db4857e94f3729df7cc1d3ba9686c. in table {NAME =>
'Hadoop', FAMILIES => [{NAME => 'ClientTrace', BLOOMFILTER => 'NONE',
REPLICATION_SCOPE => '0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL =>
'2147483647', BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE =>
'true'}, {NAME => 'dfs_FSDirectory', BLOOMFILTER => 'NONE',
REPLICATION_SCOPE => '0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL =>
'2147483647', BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE =>
'true'}, {NAME => 'dfs_FSNamesystem', BLOOMFILTER => 'NONE',
REPLICATION_SCOPE => '0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL =>
'2147483647', BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE =>
'true'}, {NAME => 'dfs_datanode', BLOOMFILTER => 'NONE', REPLICATION_SCOPE
=> '0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL => '2147483647',
BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE => 'true'}, {NAME =>
'dfs_namenode', BLOOMFILTER => 'NONE', REPLICATION_SCOPE => '0', VERSIONS =>
'65535', COMPRESSION => 'NONE', TTL => '2147483647', BLOCKSIZE => '65536',
IN_MEMORY => 'false', BLOCKCACHE => 'true'}, {NAME => 'jvm_metrics',
BLOOMFILTER => 'NONE', REPLICATION_SCOPE => '0', VERSIONS => '65535',
COMPRESSION => 'NONE', TTL => '2147483647', BLOCKSIZE => '65536', IN_MEMORY
=> 'false', BLOCKCACHE => 'true'}, {NAME => 'mapred_job', BLOOMFILTER =>
'NONE', REPLICATION_SCOPE => '0', VERSIONS => '65535', COMPRESSION =>
'NONE', TTL => '2147483647', BLOCKSIZE => '65536', IN_MEMORY => 'false',
BLOCKCACHE => 'true'}, {NAME => 'mapred_jobtracker', BLOOMFILTER => 'NONE',
REPLICATION_SCOPE => '0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL =>
'2147483647', BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE =>
'true'}, {NAME => 'mapred_shuffleOutput', BLOOMFILTER => 'NONE',
REPLICATION_SCOPE => '0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL =>
'2147483647', BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE =>
'true'}, {NAME => 'mapred_tasktracker', BLOOMFILTER => 'NONE',
REPLICATION_SCOPE => '0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL =>
'2147483647', BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE =>
'true'}, {NAME => 'rpc_metrics', BLOOMFILTER => 'NONE', REPLICATION_SCOPE =>
'0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL => '2147483647',
BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE => 'true'}]}
at
org.apache.hadoop.hbase.regionserver.HRegion.checkFamily(HRegion.java:3182)
at
org.apache.hadoop.hbase.regionserver.HRegion.checkFamilies(HRegion.java:1735)
at
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchPut(HRegion.java:1417)
at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:1381)
at
org.apache.hadoop.hbase.regionserver.HRegionServer.multi(HRegionServer.java:2597)
at sun.reflect.GeneratedMethodAccessor15.invoke(Unknown Source)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.hbase.ipc.HBaseRPC$Server.call(HBaseRPC.java:570)
at
org.apache.hadoop.hbase.ipc.HBaseServer$Handler.run(HBaseServer.java:1039)
2011-06-06 16:06:15,752 WARN org.apache.hadoop.hbase.regionserver.HRegion:
No such column family in batch put
org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException: Column
family chukwaAgent_metrics does not exist in region
Hadoop,,1307351718977.567db4857e94f3729df7cc1d3ba9686c. in table {NAME =>
'Hadoop', FAMILIES => [{NAME => 'ClientTrace', BLOOMFILTER => 'NONE',
REPLICATION_SCOPE => '0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL =>
'2147483647', BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE =>
'true'}, {NAME => 'dfs_FSDirectory', BLOOMFILTER => 'NONE',
REPLICATION_SCOPE => '0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL =>
'2147483647', BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE =>
'true'}, {NAME => 'dfs_FSNamesystem', BLOOMFILTER => 'NONE',
REPLICATION_SCOPE => '0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL =>
'2147483647', BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE =>
'true'}, {NAME => 'dfs_datanode', BLOOMFILTER => 'NONE', REPLICATION_SCOPE
=> '0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL => '2147483647',
BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE => 'true'}, {NAME =>
'dfs_namenode', BLOOMFILTER => 'NONE', REPLICATION_SCOPE => '0', VERSIONS =>
'65535', COMPRESSION => 'NONE', TTL => '2147483647', BLOCKSIZE => '65536',
IN_MEMORY => 'false', BLOCKCACHE => 'true'}, {NAME => 'jvm_metrics',
BLOOMFILTER => 'NONE', REPLICATION_SCOPE => '0', VERSIONS => '65535',
COMPRESSION => 'NONE', TTL => '2147483647', BLOCKSIZE => '65536', IN_MEMORY
=> 'false', BLOCKCACHE => 'true'}, {NAME => 'mapred_job', BLOOMFILTER =>
'NONE', REPLICATION_SCOPE => '0', VERSIONS => '65535', COMPRESSION =>
'NONE', TTL => '2147483647', BLOCKSIZE => '65536', IN_MEMORY => 'false',
BLOCKCACHE => 'true'}, {NAME => 'mapred_jobtracker', BLOOMFILTER => 'NONE',
REPLICATION_SCOPE => '0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL =>
'2147483647', BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE =>
'true'}, {NAME => 'mapred_shuffleOutput', BLOOMFILTER => 'NONE',
REPLICATION_SCOPE => '0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL =>
'2147483647', BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE =>
'true'}, {NAME => 'mapred_tasktracker', BLOOMFILTER => 'NONE',
REPLICATION_SCOPE => '0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL =>
'2147483647', BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE =>
'true'}, {NAME => 'rpc_metrics', BLOOMFILTER => 'NONE', REPLICATION_SCOPE =>
'0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL => '2147483647',
BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE => 'true'}]}
at
org.apache.hadoop.hbase.regionserver.HRegion.checkFamily(HRegion.java:3182)
at
org.apache.hadoop.hbase.regionserver.HRegion.checkFamilies(HRegion.java:1735)
at
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchPut(HRegion.java:1417)
at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:1381)
at
org.apache.hadoop.hbase.regionserver.HRegionServer.multi(HRegionServer.java:2597)
at sun.reflect.GeneratedMethodAccessor15.invoke(Unknown Source)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.hbase.ipc.HBaseRPC$Server.call(HBaseRPC.java:570)
at
org.apache.hadoop.hbase.ipc.HBaseServer$Handler.run(HBaseServer.java:1039)
2011-06-06 16:06:15,754 WARN org.apache.hadoop.hbase.regionserver.HRegion:
No such column family in batch put
org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException: Column
family chukwaAgent_httpSender does not exist in region
Hadoop,,1307351718977.567db4857e94f3729df7cc1d3ba9686c. in table {NAME =>
'Hadoop', FAMILIES => [{NAME => 'ClientTrace', BLOOMFILTER => 'NONE',
REPLICATION_SCOPE => '0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL =>
'2147483647', BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE =>
'true'}, {NAME => 'dfs_FSDirectory', BLOOMFILTER => 'NONE',
REPLICATION_SCOPE => '0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL =>
'2147483647', BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE =>
'true'}, {NAME => 'dfs_FSNamesystem', BLOOMFILTER => 'NONE',
REPLICATION_SCOPE => '0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL =>
'2147483647', BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE =>
'true'}, {NAME => 'dfs_datanode', BLOOMFILTER => 'NONE', REPLICATION_SCOPE
=> '0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL => '2147483647',
BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE => 'true'}, {NAME =>
'dfs_namenode', BLOOMFILTER => 'NONE', REPLICATION_SCOPE => '0', VERSIONS =>
'65535', COMPRESSION => 'NONE', TTL => '2147483647', BLOCKSIZE => '65536',
IN_MEMORY => 'false', BLOCKCACHE => 'true'}, {NAME => 'jvm_metrics',
BLOOMFILTER => 'NONE', REPLICATION_SCOPE => '0', VERSIONS => '65535',
COMPRESSION => 'NONE', TTL => '2147483647', BLOCKSIZE => '65536', IN_MEMORY
=> 'false', BLOCKCACHE => 'true'}, {NAME => 'mapred_job', BLOOMFILTER =>
'NONE', REPLICATION_SCOPE => '0', VERSIONS => '65535', COMPRESSION =>
'NONE', TTL => '2147483647', BLOCKSIZE => '65536', IN_MEMORY => 'false',
BLOCKCACHE => 'true'}, {NAME => 'mapred_jobtracker', BLOOMFILTER => 'NONE',
REPLICATION_SCOPE => '0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL =>
'2147483647', BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE =>
'true'}, {NAME => 'mapred_shuffleOutput', BLOOMFILTER => 'NONE',
REPLICATION_SCOPE => '0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL =>
'2147483647', BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE =>
'true'}, {NAME => 'mapred_tasktracker', BLOOMFILTER => 'NONE',
REPLICATION_SCOPE => '0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL =>
'2147483647', BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE =>
'true'}, {NAME => 'rpc_metrics', BLOOMFILTER => 'NONE', REPLICATION_SCOPE =>
'0', VERSIONS => '65535', COMPRESSION => 'NONE', TTL => '2147483647',
BLOCKSIZE => '65536', IN_MEMORY => 'false', BLOCKCACHE => 'true'}]}
at
org.apache.hadoop.hbase.regionserver.HRegion.checkFamily(HRegion.java:3182)
at
org.apache.hadoop.hbase.regionserver.HRegion.checkFamilies(HRegion.java:1735)
at
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchPut(HRegion.java:1417)
at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:1381)
at
org.apache.hadoop.hbase.regionserver.HRegionServer.multi(HRegionServer.java:2597)
at sun.reflect.GeneratedMethodAccessor15.invoke(Unknown Source)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.hbase.ipc.HBaseRPC$Server.call(HBaseRPC.java:570)
at
org.apache.hadoop.hbase.ipc.HBaseServer$Handler.run(HBaseServer.java:1039)


>From the collector.log:
2011-06-06 16:01:15,749 WARN btpool0-1 HBaseWriter -
org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException: Failed
1 action: NoSuchColumnFamilyException: 1 time, servers with issues:
chukwanode:43354,
at
org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation.processBatch(HConnectionManager.java:1223)
at
org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation.processBatchOfPuts(HConnectionManager.java:1237)
at org.apache.hadoop.hbase.client.HTable.flushCommits(HTable.java:819)
at org.apache.hadoop.hbase.client.HTable.doPut(HTable.java:675)
at org.apache.hadoop.hbase.client.HTable.put(HTable.java:665)
at
org.apache.hadoop.chukwa.datacollection.writer.hbase.HBaseWriter.add(HBaseWriter.java:203)
at
org.apache.hadoop.chukwa.datacollection.writer.SocketTeeWriter.add(SocketTeeWriter.java:252)
at
org.apache.hadoop.chukwa.datacollection.writer.PipelineStageWriter.add(PipelineStageWriter.java:41)
at
org.apache.hadoop.chukwa.datacollection.collector.servlet.ServletCollector.accept(ServletCollector.java:154)
at
org.apache.hadoop.chukwa.datacollection.collector.servlet.ServletCollector.doPost(ServletCollector.java:203)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:727)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:820)
at org.mortbay.jetty.servlet.ServletHolder.handle(ServletHolder.java:487)
at org.mortbay.jetty.servlet.ServletHandler.handle(ServletHandler.java:362)
at org.mortbay.jetty.servlet.SessionHandler.handle(SessionHandler.java:181)
at org.mortbay.jetty.handler.ContextHandler.handle(ContextHandler.java:729)
at org.mortbay.jetty.handler.HandlerWrapper.handle(HandlerWrapper.java:152)
at org.mortbay.jetty.Server.handle(Server.java:324)
at org.mortbay.jetty.HttpConnection.handleRequest(HttpConnection.java:505)
at
org.mortbay.jetty.HttpConnection$RequestHandler.content(HttpConnection.java:843)
at org.mortbay.jetty.HttpParser.parseNext(HttpParser.java:647)
at org.mortbay.jetty.HttpParser.parseAvailable(HttpParser.java:211)
at org.mortbay.jetty.HttpConnection.handle(HttpConnection.java:380)
at
org.mortbay.io.nio.SelectChannelEndPoint.run(SelectChannelEndPoint.java:395)
at
org.mortbay.thread.BoundedThreadPool$PoolThread.run(BoundedThreadPool.java:450)

2011-06-06 16:01:15,757 WARN btpool0-1 HBaseWriter -
org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException: Failed
1 action: NoSuchColumnFamilyException: 1 time, servers with issues:
chukwanode:43354,
at
org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation.processBatch(HConnectionManager.java:1223)
at
org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation.processBatchOfPuts(HConnectionManager.java:1237)
at org.apache.hadoop.hbase.client.HTable.flushCommits(HTable.java:819)
at org.apache.hadoop.hbase.client.HTable.doPut(HTable.java:675)
at org.apache.hadoop.hbase.client.HTable.put(HTable.java:665)
at
org.apache.hadoop.chukwa.datacollection.writer.hbase.HBaseWriter.add(HBaseWriter.java:203)
at
org.apache.hadoop.chukwa.datacollection.writer.SocketTeeWriter.add(SocketTeeWriter.java:252)
at
org.apache.hadoop.chukwa.datacollection.writer.PipelineStageWriter.add(PipelineStageWriter.java:41)
at
org.apache.hadoop.chukwa.datacollection.collector.servlet.ServletCollector.accept(ServletCollector.java:154)
at
org.apache.hadoop.chukwa.datacollection.collector.servlet.ServletCollector.doPost(ServletCollector.java:203)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:727)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:820)
at org.mortbay.jetty.servlet.ServletHolder.handle(ServletHolder.java:487)
at org.mortbay.jetty.servlet.ServletHandler.handle(ServletHandler.java:362)
at org.mortbay.jetty.servlet.SessionHandler.handle(SessionHandler.java:181)
at org.mortbay.jetty.handler.ContextHandler.handle(ContextHandler.java:729)
at org.mortbay.jetty.handler.HandlerWrapper.handle(HandlerWrapper.java:152)
at org.mortbay.jetty.Server.handle(Server.java:324)
at org.mortbay.jetty.HttpConnection.handleRequest(HttpConnection.java:505)
at
org.mortbay.jetty.HttpConnection$RequestHandler.content(HttpConnection.java:843)
at org.mortbay.jetty.HttpParser.parseNext(HttpParser.java:647)
at org.mortbay.jetty.HttpParser.parseAvailable(HttpParser.java:211)
at org.mortbay.jetty.HttpConnection.handle(HttpConnection.java:380)
at
org.mortbay.io.nio.SelectChannelEndPoint.run(SelectChannelEndPoint.java:395)
at
org.mortbay.thread.BoundedThreadPool$PoolThread.run(BoundedThreadPool.java:450)

2011-06-06 16:01:15,759 WARN btpool0-1 HBaseWriter -
org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException: Failed
1 action: NoSuchColumnFamilyException: 1 time, servers with issues:
chukwanode:43354,
at
org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation.processBatch(HConnectionManager.java:1223)
at
org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation.processBatchOfPuts(HConnectionManager.java:1237)
at org.apache.hadoop.hbase.client.HTable.flushCommits(HTable.java:819)
at org.apache.hadoop.hbase.client.HTable.doPut(HTable.java:675)
at org.apache.hadoop.hbase.client.HTable.put(HTable.java:665)
at
org.apache.hadoop.chukwa.datacollection.writer.hbase.HBaseWriter.add(HBaseWriter.java:203)
at
org.apache.hadoop.chukwa.datacollection.writer.SocketTeeWriter.add(SocketTeeWriter.java:252)
at
org.apache.hadoop.chukwa.datacollection.writer.PipelineStageWriter.add(PipelineStageWriter.java:41)
at
org.apache.hadoop.chukwa.datacollection.collector.servlet.ServletCollector.accept(ServletCollector.java:154)
at
org.apache.hadoop.chukwa.datacollection.collector.servlet.ServletCollector.doPost(ServletCollector.java:203)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:727)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:820)
at org.mortbay.jetty.servlet.ServletHolder.handle(ServletHolder.java:487)
at org.mortbay.jetty.servlet.ServletHandler.handle(ServletHandler.java:362)
at org.mortbay.jetty.servlet.SessionHandler.handle(SessionHandler.java:181)
at org.mortbay.jetty.handler.ContextHandler.handle(ContextHandler.java:729)
at org.mortbay.jetty.handler.HandlerWrapper.handle(HandlerWrapper.java:152)
at org.mortbay.jetty.Server.handle(Server.java:324)
at org.mortbay.jetty.HttpConnection.handleRequest(HttpConnection.java:505)
at
org.mortbay.jetty.HttpConnection$RequestHandler.content(HttpConnection.java:843)
at org.mortbay.jetty.HttpParser.parseNext(HttpParser.java:647)
at org.mortbay.jetty.HttpParser.parseAvailable(HttpParser.java:211)
at org.mortbay.jetty.HttpConnection.handle(HttpConnection.java:380)
at
org.mortbay.io.nio.SelectChannelEndPoint.run(SelectChannelEndPoint.java:395)
at
org.mortbay.thread.BoundedThreadPool$PoolThread.run(BoundedThreadPool.java:450)

2011-06-06 16:01:18,532 INFO Timer-2 HBaseWriter -
stat=HBaseWriter|dataRate=490
2011-06-06 16:01:28,532 INFO Timer-2 HBaseWriter -
stat=HBaseWriter|dataRate=0
2011-06-06 16:01:38,532 INFO Timer-2 HBaseWriter -
stat=HBaseWriter|dataRate=0
2011-06-06 16:01:48,449 INFO Timer-1 root -
stats:ServletCollector,numberHTTPConnection:2,numberchunks:4
2011-06-06 16:01:48,532 INFO Timer-2 HBaseWriter -
stat=HBaseWriter|dataRate=0
2011-06-06 16:01:58,532 INFO Timer-2 HBaseWriter -
stat=HBaseWriter|dataRate=0



On Sat, Jun 4, 2011 at 6:02 AM, Eric Yang <eyang@yahoo-inc.com> wrote:
> I just uploaded a patch for Hbase 0.90.3, feel free to try it out.
>
> https://issues.apache.org/jira/browse/CHUKWA-586
>
> Regards,
> Eric
>
>
> On 6/3/11 2:45 AM, "Preetam Patil" <pvpatil.iitb@gmail.com> wrote:
>
> Thanks for the reply, Eric.
> The problem seems to be because of HBase version incompatibility; my
> chukwa was still referring to 0.20.6 while the HBase I have on my
> system is 0.9.1. Rebuilding Chukwa with HBase 0.9.1 and running it
> gives HBase API incompatibility error. I'll try to get it working with
> HBase 0.20.6 now.
>
> Is there any way to get chukwa trunk working with HBase 0.9.*?
> Thanks,
>
> -preetam
>
> On Wed, Jun 1, 2011 at 11:40 PM, Eric Yang <eyang@yahoo-inc.com> wrote:
>> Hi Preetam,
>>
>> Have you run hbase shell < hbase.schema ?  The problem description has
two
>> possible outcomes.
>>
>> 1, Hbase schema has not been applied to hbase. Solution:
>>
>> Hbase shell < /path/to/chukwa/conf/hbase.schema
>>
>> 2, hbase-site.xml and hadoop-site.xml are not in chukwa configuration
>> directory or nor in the chukwa collector class path.  Solution:
>>
>> cp hadoop-site.xml hbase-site.xml /path/to/chukwa/conf
>>
>> Hope this helps.
>>
>> Regards,
>> Eric
>>
>>
>> On 6/1/11 10:03 AM, "Preetam Patil" <pvpatil.iitb@gmail.com> wrote:
>>
>> Hi Eric and DKN,
>>
>> I am facing a similar (maybe a noob) problem: chukwa collector is able
>> to write to a sequence file, but when I enabl eHBaseWriter, the
>> HBaseTable entries are not added. I am not sure whether it's a problem
>> with my HBase setup (it's in standalone mode) or Chukwa Demux parsers.
>> I get two errors: "ERROR main HBaseWriter - Hbase schema mismatch with
>> demux parser" and "INFO main HConnectionManager$TableServers -
>> getMaster at tempt 6 of 10 failed; retrying after sleep of 4000".
>> Can't figure out where the problem lies.
>>
>> Attaching the collector.log to this mail and below is my
>> chukwa-collector-conf.xml:
>>
>> Thanks,
>> -preetam
>>
>> <?xml version="1.0"?>
>> <!--
>>    Licensed to the Apache Software Foundation (ASF) under one or more
>>    contributor license agreements.  See the NOTICE file distributed with
>>    this work for additional information regarding copyright ownership.
>>    The ASF licenses this file to You under the Apache License, Version
2.0
>>    (the "License"); you may not use this file except in compliance with
>>    the License.  You may obtain a copy of the License at
>>
>>        http://www.apache.org/licenses/LICENSE-2.0
>>
>>    Unless required by applicable law or agreed to in writing, software
>>    distributed under the License is distributed on an "AS IS" BASIS,
>>    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
>> implied.
>>    See the License for the specific language governing permissions and
>>    limitations under the License.
>> -->
>> <?xml-stylesheet type="text/xsl"  href="configuration.xsl"?>
>>
>> <!-- Put site-specific property overrides in this file. -->
>>
>> <configuration>
>>
>>   <property>
>>     <name>chukwaCollector.writerClass</name>
>>
>>
    <value>org.apache.hadoop.chukwa.datacollection.writer.PipelineStageWriter</value>
>>   </property>
>>
>> <!-- peetam: replaced by HBaseWriter pipeline below
>>   <property>
>>     <name>chukwaCollector.pipeline</name>
>>
>>
    <value>org.apache.hadoop.chukwa.datacollection.writer.SocketTeeWriter,org.apache.hadoop.chukwa.datacollection.writer.SeqFileWriter</value>
>>   </property>
>> -->
>>
>> <!-- LocalWriter parameters
>>   <property>
>>     <name>chukwaCollector.localOutputDir</name>
>>     <value>/tmp/chukwa/dataSink/</value>
>>     <description>Chukwa local data sink directory, see
>> LocalWriter.java</description>
>>   </property>
>>
>>   <property>
>>     <name>chukwaCollector.writerClass</name>
>>
>>
    <value>org.apache.hadoop.chukwa.datacollection.writer.localfs.LocalWriter</value>
>>     <description>Local chukwa writer, see LocalWriter.java</description>
>>   </property>
>> -->
>>
>> <!-- When writing to HBase, uncomment the following parameters. If
>> you're running
>> HBase in distributed mode, you'll also need to copy your
>> hbase-site.xml file with
>> your hbase.zookeeper.quorum setting to the conf/ dir. -->
>>
>>   <property>
>>     <name>chukwaCollector.pipeline</name>
>>
>>
    <value>org.apache.hadoop.chukwa.datacollection.writer.SocketTeeWriter,org.apache.hadoop.chukwa.datacollection.writer.hbase.HBaseWriter</value>
>>   </property>
>>
>>   <property>
>>     <name>hbase.demux.package</name>
>>     <value>org.apache.hadoop.chukwa.extraction.demux.processor</value>
>>     <description>Demux parser class package, HBaseWriter uses this
>> package name to validate HBase for annotated demux parser
>> classes.</description>
>>   </property>
>>
>>   <property>
>>     <name>hbase.writer.verify.schema</name>
>>     <value>true</value>
>>     <description>Verify HBase Table schema with demux parser schema, log
>>     warning if there are mismatch between hbase schema and demux parsers.
>>     </description>
>>   </property>
>>
>>   <property>
>>     <name>hbase.writer.halt.on.schema.mismatch</name>
>>     <value>false</value>
>>     <description>If this option is set to true, and HBase table schema
>>     is mismatched with demux parser, collector will shut down itself.
>>     </description>
>>   </property>
>>
>>   <property>
>>     <name>writer.hdfs.filesystem</name>
>>     <value>hdfs://localhost:8020/</value>
>>     <description>HDFS to dump to</description>
>>   </property>
>>
>>   <property>
>>     <name>chukwaCollector.outputDir</name>
>>     <value>/chukwa/logs/</value>
>>     <description>Chukwa data sink directory</description>
>>   </property>
>>
>>   <property>
>>     <name>chukwaCollector.rotateInterval</name>
>>     <value>300000</value>
>>     <description>Chukwa rotate interval (ms)</description>
>>   </property>
>>
>>   <property>
>>     <name>chukwaCollector.http.port</name>
>>     <value>8080</value>
>>     <description>The HTTP port number the collector will listen
>> on</description>
>>   </property>
>> </configuration>
>>
>>
>> On Wed, Jun 1, 2011 at 10:11 PM, Eric Yang <eyang@yahoo-inc.com> wrote:
>>> From the current configuration, it is setup to write to Hbase only, it
>>> does
>>> not write to HDFS.  If you want the data to be written to HDFS as well,
>>> use
>>> this line in chukwa-collector-conf.xml.
>>>
>>>   <property>
>>>     <name>chukwaCollector.pipeline</name>
>>>
>>>
    <value>org.apache.hadoop.chukwa.datacollection.writer.SocketTeeWriter,
>>> org.apache.hadoop.chukwa.datacollection.writer.hbase.HBaseWriter,
>>> org.apache.hadoop.chukwa.datacollection.writer.SeqFileWriter</value>
>>>   </property>
>>>
>>> Make sure that hadoop-site.xml and hbase-site.xml are in the chukwa
>>> collector class path.  This can be done by copying hadoop-site.xml and
>>> hbase-site.xml to CHUKWA_CONF_DIR.
>>>
>>> For HICC listening to localhost issue, this is a bug in Chukwa.  Feel
>>> free
>>> to file a jira.  Before compile, modify web/hicc/WEB-INF/jetty.xml and
>>> set
>>> jetty.host ip address to 0.0.0.0.  Without this setup, jetty would
choose
>>> to
>>> bind to first available IP address, it could randomly bind to localhost
>>> or
>>> eth0 depending on the OS.
>>>
>>> Regards,
>>> Eric
>>>
>>
>>
>
>

Mime
View raw message