hadoop-common-user mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From Weiming Lu <weimin...@gmail.com>
Subject Re: Help with fuse-dfs
Date Fri, 18 Dec 2009 02:14:28 GMT
We installed Chinese language packs, and now, the output of "strace ls
/mnt/dfs" is:
execve("/bin/ls", ["ls", "/mnt/dfs"], [/* 26 vars */]) = 0
brk(0)                                  = 0x61a000
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1,
0) = 0x7f8f5ebc0000
access("/etc/ld.so.nohwcap", F_OK)      = -1 ENOENT (No such file or directory)
mmap(NULL, 8192, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1,
0) = 0x7f8f5ebbe000
access("/etc/ld.so.preload", R_OK)      = -1 ENOENT (No such file or directory)
open("/etc/ld.so.cache", O_RDONLY)      = 3
fstat(3, {st_mode=S_IFREG|0644, st_size=45480, ...}) = 0
mmap(NULL, 45480, PROT_READ, MAP_PRIVATE, 3, 0) = 0x7f8f5ebb2000
close(3)                                = 0
access("/etc/ld.so.nohwcap", F_OK)      = -1 ENOENT (No such file or directory)
open("/lib/librt.so.1", O_RDONLY)       = 3
read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\240\"\0"..., 832) = 832
fstat(3, {st_mode=S_IFREG|0644, st_size=35752, ...}) = 0
mmap(NULL, 2132976, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3,
0) = 0x7f8f5e79a000
mprotect(0x7f8f5e7a2000, 2093056, PROT_NONE) = 0
mmap(0x7f8f5e9a1000, 8192, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x7000) = 0x7f8f5e9a1000
close(3)                                = 0
access("/etc/ld.so.nohwcap", F_OK)      = -1 ENOENT (No such file or directory)
open("/lib/libselinux.so.1", O_RDONLY)  = 3
read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\240Q\0\0"..., 832) = 832
fstat(3, {st_mode=S_IFREG|0644, st_size=109368, ...}) = 0
mmap(NULL, 2209176, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3,
0) = 0x7f8f5e57e000
mprotect(0x7f8f5e597000, 2097152, PROT_NONE) = 0
mmap(0x7f8f5e797000, 8192, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x19000) = 0x7f8f5e797000
mmap(0x7f8f5e799000, 1432, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x7f8f5e799000
close(3)                                = 0
access("/etc/ld.so.nohwcap", F_OK)      = -1 ENOENT (No such file or directory)
open("/lib/libacl.so.1", O_RDONLY)      = 3
read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\220\33\0"..., 832) = 832
fstat(3, {st_mode=S_IFREG|0644, st_size=27600, ...}) = 0
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1,
0) = 0x7f8f5ebb1000
mmap(NULL, 2122744, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3,
0) = 0x7f8f5e377000
mprotect(0x7f8f5e37d000, 2097152, PROT_NONE) = 0
mmap(0x7f8f5e57d000, 4096, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x6000) = 0x7f8f5e57d000
close(3)                                = 0
access("/etc/ld.so.nohwcap", F_OK)      = -1 ENOENT (No such file or directory)
open("/lib/libc.so.6", O_RDONLY)        = 3
read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\220\345"..., 832) = 832
fstat(3, {st_mode=S_IFREG|0755, st_size=1502520, ...}) = 0
mmap(NULL, 3609304, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3,
0) = 0x7f8f5e005000
mprotect(0x7f8f5e16e000, 2093056, PROT_NONE) = 0
mmap(0x7f8f5e36d000, 20480, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x168000) = 0x7f8f5e36d000
mmap(0x7f8f5e372000, 17112, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x7f8f5e372000
close(3)                                = 0
access("/etc/ld.so.nohwcap", F_OK)      = -1 ENOENT (No such file or directory)
open("/lib/libpthread.so.0", O_RDONLY)  = 3
read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\320W\0\0"..., 832) = 832
fstat(3, {st_mode=S_IFREG|0755, st_size=130214, ...}) = 0
mmap(NULL, 2208624, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3,
0) = 0x7f8f5dde9000
mprotect(0x7f8f5de00000, 2093056, PROT_NONE) = 0
mmap(0x7f8f5dfff000, 8192, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x16000) = 0x7f8f5dfff000
mmap(0x7f8f5e001000, 13168, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x7f8f5e001000
close(3)                                = 0
access("/etc/ld.so.nohwcap", F_OK)      = -1 ENOENT (No such file or directory)
open("/lib/libdl.so.2", O_RDONLY)       = 3
read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\260\r\0"..., 832) = 832
fstat(3, {st_mode=S_IFREG|0644, st_size=14608, ...}) = 0
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1,
0) = 0x7f8f5ebb0000
mmap(NULL, 2109696, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3,
0) = 0x7f8f5dbe5000
mprotect(0x7f8f5dbe7000, 2097152, PROT_NONE) = 0
mmap(0x7f8f5dde7000, 8192, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x2000) = 0x7f8f5dde7000
close(3)                                = 0
access("/etc/ld.so.nohwcap", F_OK)      = -1 ENOENT (No such file or directory)
open("/lib/libattr.so.1", O_RDONLY)     = 3
read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0000\21\0"..., 832) = 832
fstat(3, {st_mode=S_IFREG|0644, st_size=16128, ...}) = 0
mmap(NULL, 2111240, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3,
0) = 0x7f8f5d9e1000
mprotect(0x7f8f5d9e5000, 2093056, PROT_NONE) = 0
mmap(0x7f8f5dbe4000, 4096, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x3000) = 0x7f8f5dbe4000
close(3)                                = 0
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1,
0) = 0x7f8f5ebaf000
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1,
0) = 0x7f8f5ebae000
arch_prctl(ARCH_SET_FS, 0x7f8f5ebae780) = 0
mprotect(0x7f8f5dde7000, 4096, PROT_READ) = 0
mprotect(0x7f8f5dfff000, 4096, PROT_READ) = 0
mprotect(0x7f8f5e36d000, 16384, PROT_READ) = 0
mprotect(0x7f8f5e9a1000, 4096, PROT_READ) = 0
mprotect(0x7f8f5ebc1000, 4096, PROT_READ) = 0
munmap(0x7f8f5ebb2000, 45480)           = 0
set_tid_address(0x7f8f5ebae810)         = 10397
set_robust_list(0x7f8f5ebae820, 0x18)   = 0
futex(0x7fff66bc018c, 0x81 /* FUTEX_??? */, 1) = 0
rt_sigaction(SIGRTMIN, {0x7f8f5ddee660, [], SA_RESTORER|SA_SIGINFO,
0x7f8f5ddf80f0}, NULL, 8) = 0
rt_sigaction(SIGRT_1, {0x7f8f5ddee6f0, [],
SA_RESTORER|SA_RESTART|SA_SIGINFO, 0x7f8f5ddf80f0}, NULL, 8) = 0
rt_sigprocmask(SIG_UNBLOCK, [RTMIN RT_1], NULL, 8) = 0
getrlimit(RLIMIT_STACK, {rlim_cur=8192*1024, rlim_max=RLIM_INFINITY}) = 0
brk(0)                                  = 0x61a000
brk(0x63b000)                           = 0x63b000
open("/etc/selinux/config", O_RDONLY)   = -1 ENOENT (No such file or directory)
statfs("/selinux", 0x7fff66bbf0b0)      = -1 ENOENT (No such file or directory)
open("/proc/mounts", O_RDONLY)          = 3
fstat(3, {st_mode=S_IFREG|0444, st_size=0, ...}) = 0
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1,
0) = 0x7f8f5ebbd000
read(3, "rootfs / rootfs rw 0 0\nnone /sys"..., 1024) = 948
read(3, "", 1024)                       = 0
close(3)                                = 0
munmap(0x7f8f5ebbd000, 4096)            = 0
open("/usr/lib/locale/locale-archive", O_RDONLY) = -1 ENOENT (No such
file or directory)
open("/usr/share/locale/locale.alias", O_RDONLY) = 3
fstat(3, {st_mode=S_IFREG|0644, st_size=2586, ...}) = 0
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1,
0) = 0x7f8f5ebbd000
read(3, "# Locale name alias data base.\n#"..., 4096) = 2586
read(3, "", 4096)                       = 0
close(3)                                = 0
munmap(0x7f8f5ebbd000, 4096)            = 0
open("/usr/lib/locale/zh_CN/LC_IDENTIFICATION", O_RDONLY) = -1 ENOENT
(No such file or directory)
open("/usr/lib/locale/zh/LC_IDENTIFICATION", O_RDONLY) = -1 ENOENT (No
such file or directory)
open("/usr/share/locale-langpack/zh_CN/LC_IDENTIFICATION", O_RDONLY) =
-1 ENOENT (No such file or directory)
open("/usr/share/locale-langpack/zh/LC_IDENTIFICATION", O_RDONLY) = -1
ENOENT (No such file or directory)
ioctl(1, SNDCTL_TMR_TIMEBASE or TCGETS, {B38400 opost isig icanon echo ...}) = 0
ioctl(1, TIOCGWINSZ, {ws_row=33, ws_col=122, ws_xpixel=0, ws_ypixel=0}) = 0
stat("/mnt/dfs", {st_mode=S_IFDIR|0777, st_size=4096, ...}) = 0
open("/mnt/dfs", O_RDONLY|O_NONBLOCK|O_DIRECTORY|0x80000) = 3
fstat(3, {st_mode=S_IFDIR|0777, st_size=4096, ...}) = 0
fcntl(3, F_GETFD)                       = 0x1 (flags FD_CLOEXEC)
getdents(3, 0x61fec8, 512)              = -1 EIO (Input/output error)
write(2, "ls: ", 4ls: )                     = 4
write(2, "reading directory /mnt/dfs", 26reading directory /mnt/dfs) = 26
write(2, ": Input/output error", 20: Input/output error)    = 20
write(2, "\n", 1
)                       = 1
close(3)                                = 0
close(1)                                = 0
close(2)                                = 0
exit_group(2)                           = ?
Process 10397 detached


On Fri, Dec 18, 2009 at 8:11 AM, Eli Collins <eli@cloudera.com> wrote:
> Hey Weiming,
>
> What's the output of "mount" and "strace ls /mnt/dfs"?
>
> Thanks,
> Eli
>
> On Wed, Dec 16, 2009 at 10:33 PM, Weiming Lu <weiminglu@gmail.com> wrote:
>> So, Can we use one machine which is not the namenode or datanode to
>> mount FUSE-DFS which is start up at console instead of X Window ?.
>>
>> Yesterday, I installed the hadoop-0.18.2 on another machine, and
>> builded the FUSE-DFS successfully.
>>
>> Should the conf files such as hadoop-site.xml, masters and slaves in
>> that machine must be the same as the hadoop cluster?
>> I copied the hadoop-site.xml, masters and slaves from the hadoop
>> cluster to the new machine.
>> In that machine, we can do 'hadoop fs -ls /' to list the files in
>> hadoop cluster.
>>
>> In fuse_dfs_wrapper.sh, we set JAVA_HOME, OS_ARCH, OS_NAME,
>> HADOOP_HOME,CLASSPATH, and modified the command as './fuse_dfs $@'
>> Then, we do './fuse_dfs_wrapper.sh dfs://10.15.62.4:54310 /mnt/dfs -d'
>> It output:
>> port=54310,server=10.15.62.4
>> fuse-dfs didn't recognize /mnt/dfs,-2
>> fuse-dfs ignoring option -d
>> FUSE library version: 2.8.1
>> nullpath_ok: 0
>> unique: 1, opcode: INIT (26), nodeid: 0, insize: 56
>> INIT: 7.9
>> flags=0x0000000b
>> max_readahead=0x00020000
>>   INIT: 7.12
>>   flags=0x00000011
>>   max_readahead=0x00020000
>>   max_write=0x00020000
>>   unique: 1, success, outsize: 40
>> unique: 2, opcode: GETATTR (3), nodeid: 1, insize: 56
>> getattr /
>>   unique: 2, success, outsize: 120
>>
>> In other console, we found the /mnt/dfs is mounted, and we can 'cd dfs'
>> However, when we 'ls /mnt/dfs', we got the error.
>> ls: reading directory .: Input/output error
>>
>> The output of FUSE_DFS is:
>> unique: 1, success, outsize: 40
>> unique: 2, opcode: GETATTR (3), nodeid: 1, insize: 56
>> getattr /
>>   unique: 2, success, outsize: 120
>> unique: 3, opcode: GETATTR (3), nodeid: 1, insize: 56
>> getattr /
>>   unique: 3, success, outsize: 120
>> unique: 4, opcode: OPENDIR (27), nodeid: 1, insize: 48
>>   unique: 4, success, outsize: 32
>> unique: 5, opcode: READDIR (28), nodeid: 1, insize: 80
>> readdir[0] from 0
>>   unique: 5, success, outsize: 200
>> unique: 6, opcode: RELEASEDIR (29), nodeid: 1, insize: 64
>>   unique: 6, success, outsize: 16
>> unique: 7, opcode: GETATTR (3), nodeid: 1, insize: 56
>> getattr /
>>   unique: 7, success, outsize: 120
>> unique: 8, opcode: OPENDIR (27), nodeid: 1, insize: 48
>>   unique: 8, success, outsize: 32
>> unique: 9, opcode: READDIR (28), nodeid: 1, insize: 80
>> readdir[0] from 0
>>   unique: 9, success, outsize: 200
>> unique: 10, opcode: RELEASEDIR (29), nodeid: 1, insize: 64
>>   unique: 10, success, outsize: 16
>>
>> Can somebody help me?
>> Thanks.
>>
>>
>>
>> On Wed, Dec 16, 2009 at 10:06 PM, Brian Bockelman <bbockelm@cse.unl.edu> wrote:
>>> Sigh, I suppose this means I need to come out from under the rock I've been hiding
under and file a JIRA.
>>>
>>> As I mentioned, the work-around is to mount FUSE-DFS when GNOME is not running.
>>>
>>> Brian
>>>
>>> On Dec 16, 2009, at 8:00 AM, Weiming Lu wrote:
>>>
>>>> Hi, Brian,  that is the point, you are right. thanks very much.
>>>> We used the Ubuntu Desktop for easy programing.
>>>> It appears that I have to upgrade the hadoop to the latest version.
>>>>
>>>>
>>>> On Wed, Dec 16, 2009 at 8:39 PM, Brian Bockelman <bbockelm@cse.unl.edu>
wrote:
>>>>> Hey Weiming,
>>>>>
>>>>> We've recently found a race condition in FUSE-DFS that can be triggered
when you run it from a host where GNOME is also running.  It causes a segfault like you report
below.
>>>>>
>>>>> The patch attached works for 19.1.  Another option is to shut down all
the instances of GNOME on your computer, then mount.
>>>>>
>>>>> Brian
>>>>>
>>>>>
>>>>>
>>>>>
>>>>> On Dec 16, 2009, at 4:20 AM, Weiming Lu wrote:
>>>>>
>>>>>> Thanks very much.
>>>>>> We use Ubuntu 8.0.4, and the kernel is 2.6.24-16-generic.
>>>>>> When we run jps, it shows:
>>>>>> 12405 startup.jar
>>>>>> 5950 startup.jar
>>>>>> 19216 SecondaryNameNode
>>>>>> 25381 Jps
>>>>>> 19053 NameNode
>>>>>> 19289 JobTracker
>>>>>>
>>>>>> When I pass "-o private" for fuse-dfs just as: fuse_dfs
>>>>>> dfs://10.15.62.4:54310 /mnt/dfs -oprivate -d
>>>>>> We see the debug message:
>>>>>> port=54310,server=10.15.62.4
>>>>>> fuse-dfs didn't recognize /mnt/dfs,-2
>>>>>> fuse-dfs ignoring option -d
>>>>>> FUSE library version: 2.8.1
>>>>>> nullpath_ok: 0
>>>>>> unique: 1, opcode: INIT (26), nodeid: 0, insize: 56
>>>>>> INIT: 7.9
>>>>>> flags=0x0000000b
>>>>>> max_readahead=0x00020000
>>>>>>   INIT: 7.12
>>>>>>   flags=0x00000011
>>>>>>   max_readahead=0x00020000
>>>>>>   max_write=0x00020000
>>>>>>   unique: 1, success, outsize: 40
>>>>>> unique: 2, opcode: GETATTR (3), nodeid: 1, insize: 56
>>>>>> getattr /
>>>>>> unique: 3, opcode: GETATTR (3), nodeid: 1, insize: 56
>>>>>> getattr /
>>>>>> #
>>>>>> # An unexpected error has been detected by Java Runtime Environment:
>>>>>> #
>>>>>> #  Internal Error (sharedRuntime.cpp:552), pid=25313, tid=1107450192
>>>>>> #  Error: guarantee(cb != __null,"exception happened outside
>>>>>> interpreter, nmethods and vtable stubs (1)")
>>>>>> #
>>>>>> # Java VM: Java HotSpot(TM) 64-Bit Server VM (10.0-b23 mixed mode
linux-amd64)
>>>>>> # An error report file with more information is saved as:
>>>>>> # /tmp/hs_err_pid25313.log
>>>>>> #
>>>>>> # If you would like to submit a bug report, please visit:
>>>>>> #   http://java.sun.com/webapps/bugreport/crash.jsp
>>>>>> #
>>>>>> Aborted
>>>>>>
>>>>>> When we list the /mnt/, the message is :
>>>>>> ls: cannot access dfs: Transport endpoint is not connected
>>>>>>
>>>>>> Thanks.
>>>>>>
>>>>>>
>>>>>> On Wed, Dec 16, 2009 at 3:40 PM, Eli Collins <eli@cloudera.com>
wrote:
>>>>>>> The "fuse-dfs didn't recognize <mntpoint>" and "fuse-dfs
ignoring
>>>>>>> option -d" are expected, they get passed along from fuse-dfs
to fuse
>>>>>>> (via fuse_main).
>>>>>>>
>>>>>>> Does it work if you pass -o private?  Still nothing reported
by dmesg?
>>>>>>>  What does jps indicate is running?What linux distribution and
kernel
>>>>>>> are you using?
>>>>>>>
>>>>>>> Thanks,
>>>>>>> Eli
>>>>>>>
>>>>>>>
>>>>>>> On Tue, Dec 15, 2009 at 5:54 PM, Weiming Lu <weiminglu@gmail.com>
wrote:
>>>>>>>> Thanks for your reply. I have added the "-d" option when
calling the
>>>>>>>> fuse_dfs, but I can't see any stack traces. The ONLY thing
I see on
>>>>>>>> the console is:
>>>>>>>> port=54310,server=10.15.62.4
>>>>>>>> fuse-dfs didn't recognize /mnt/dfs,-2
>>>>>>>> fuse-dfs ignoring option -d
>>>>>>>> fuse-dfs ignoring option -o
>>>>>>>> fuse-dfs didn't recognize allow_other,-2
>>>>>>>> fuse: invalid argument `allow_other'
>>>>>>>>
>>>>>>>> The option "-d" is ignored. Maybe there is something wrong
in my building step.
>>>>>>>> Can anyone help me? Thanks.
>>>>>>>>
>>>>>>>>
>>>>>>>> On Wed, Dec 16, 2009 at 12:49 AM, Brian Bockelman <bbockelm@cse.unl.edu>
wrote:
>>>>>>>>> Hey,
>>>>>>>>>
>>>>>>>>> One thing you can do is call the fuse_dfs executable
with the "-d" option.  This will keep the FUSE-DFS process in the foreground of the terminal,
and you will see any stack traces that occur within the Hadoop portion.  Very useful for
debugging.
>>>>>>>>>
>>>>>>>>> Brian
>>>>>>>>>
>>>>>>>>> On Dec 15, 2009, at 12:21 AM, Weiming Lu wrote:
>>>>>>>>>
>>>>>>>>>> Thanks very much, I have builded fuse-dfs successfully
with many
>>>>>>>>>> tries. But there are still some problems. We can
list files in hadoop
>>>>>>>>>> by "./hadoop fs -ls ". But after mount, no file shown
in /mnt/dfs, and
>>>>>>>>>> there is also no message in /var/log/messages.
>>>>>>>>>> does anybody encounter this ?
>>>>>>>>>>
>>>>>>>>>> The hadoop we used is hadoop 0.18.2, and our platform
is ubuntu 8.0.4, amd64.
>>>>>>>>>>
>>>>>>>>>> The steps we builded are:
>>>>>>>>>> 1. modified src/c++/libhdfs/Makefile by
>>>>>>>>>> adding OS_ARCH=amd64, and JAVA_HOME
>>>>>>>>>> removing -32m in CPPFLAGS and LDFLAGS
>>>>>>>>>> 2.modified src/c++/utils/configure and src/c++/pipes/configure
by
>>>>>>>>>> adding OS_ARCH=amd64
>>>>>>>>>> making it executable by "chmod 755"
>>>>>>>>>> 3.ln -s /usr/lib/jvm/java-6-sun/jre/lib/amd64/server/libjvm.so
/usr/local/lib
>>>>>>>>>> 4.install fuse with version 2.8.1
>>>>>>>>>> 5.ant compile-libhdfs -Dlibhdfs=1
>>>>>>>>>> 6.ant compile-contrib -Dcompile.c++=1 -Dfusedfs=1
-Dlibhdfs=1
>>>>>>>>>> -Dlibhdfs.noperms=1
>>>>>>>>>> NOW we have fuse_dfs and fuse_dfs_wrapper.sh in
>>>>>>>>>> $HADOOP_HOME/build/contrib/fuse-dfs
>>>>>>>>>>
>>>>>>>>>> 7. add the following content in fuse_dfs_wrapper.sh
>>>>>>>>>> #!/bin/bash
>>>>>>>>>> export JAVA_HOME=/usr/lib/jvm/java-6-sun
>>>>>>>>>> export OS_NAME=linux
>>>>>>>>>> export OS_ARCH=amd64
>>>>>>>>>> export HADOOP_HOME=/home/lwm/work/hadoop
>>>>>>>>>>
>>>>>>>>>> 8. ln -s /home/lwm/work/hadoop/build/libhdfs/libhdfs.so
>>>>>>>>>> /usr/local/lib/libhdfs.so
>>>>>>>>>>
>>>>>>>>>> 9. modified /etc/ld.so.conf by adding /usr/local/lib
>>>>>>>>>> and then ldconfig
>>>>>>>>>> 10. sudo mkdir /mnt/dfs
>>>>>>>>>> 11. ./fuse_dfs_wrapper.sh dfs://10.15.62.4:54310
/mnt/dfs
>>>>>>>>>> we got:
>>>>>>>>>> port=54310,server=10.15.62.4
>>>>>>>>>> fuse-dfs didn't recognize /mnt/dfs,-2
>>>>>>>>>> fuse-dfs ignoring option -d
>>>>>>>>>> fuse-dfs ignoring option -o
>>>>>>>>>> fuse-dfs didn't recognize allow_other,-2
>>>>>>>>>> fuse: invalid argument `allow_other'
>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> 2009-12-15, "Hazem Mahmoud" <hmahmoud@gmail.com>
wrote:
>>>>>>>>>>> Here are some notes I took when installing fuse
here on our system. This was
>>>>>>>>>>> for a hadoop 19.1 installation.
>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>>   Installing Fuse/Hadoop:
>>>>>>>>>>>
>>>>>>>>>>> 2.     mkdir /hdfs/client01
>>>>>>>>>>>
>>>>>>>>>>> 4.     Install Sun JDK 1.6 (must be Sun)
>>>>>>>>>>>
>>>>>>>>>>> a.     RPM: jdk-6u14-ea-linux-amd64.rpm
>>>>>>>>>>>
>>>>>>>>>>> b.     This DID NOT WORK:
>>>>>>>>>>> java version "1.6.0"
>>>>>>>>>>> OpenJDK  Runtime Environment (build 1.6.0-b09)
>>>>>>>>>>> OpenJDK 64-Bit Server VM (build 1.6.0-b09, mixed
mode)
>>>>>>>>>>>
>>>>>>>>>>> c.      Must be this:
>>>>>>>>>>> java version "1.6.0_14-ea"
>>>>>>>>>>> Java(TM) SE Runtime Environment (build 1.6.0_14-ea-b04)
>>>>>>>>>>> Java HotSpot(TM) 64-Bit Server VM (build 14.0-b13,
mixed mode)
>>>>>>>>>>>
>>>>>>>>>>> 5.     wget
>>>>>>>>>>> http://newman.ultralight.org/repos/hadoop/4/x86_64/hadoop-0.19.1-7.el4.x86_64.rpm
>>>>>>>>>>>
>>>>>>>>>>> 6.     wget
>>>>>>>>>>> http://newman.ultralight.org/repos/hadoop/4/x86_64/fuse-libs-2.7.4-8_10.el4.x86_64.rpm
>>>>>>>>>>>
>>>>>>>>>>> 7.     wget
>>>>>>>>>>> http://dag.wieers.com/rpm/packages/fuse/fuse-2.7.3-1.el5.rf.x86_64.rpm
>>>>>>>>>>> OR
>>>>>>>>>>> wget
>>>>>>>>>>> http://newman.ultralight.org/repos/hadoop/4/x86_64/fuse-2.7.4-8_10.el4.x86_64.rpm
>>>>>>>>>>>
>>>>>>>>>>> 8.     wget
>>>>>>>>>>> http://newman.ultralight.org/repos/hadoop/4/x86_64/hadoop-fuse-0.19.1-7.el4.x86_64.rpm
>>>>>>>>>>>
>>>>>>>>>>> 9.     rpm –ivh hadoop-0.19.1-7.el4.x86_64.rpm
>>>>>>>>>>>
>>>>>>>>>>> 10. rpm –ivh fuse-2.7.3-1.el5.rf.x86_64.rpm
>>>>>>>>>>>
>>>>>>>>>>> 11. rpm –ivh hadoop-fuse-0.19.1-7.el4.x86_64.rpm
>>>>>>>>>>>
>>>>>>>>>>> 12. rpm –ivh fuse-libs-2.7.4-8_10.el4.x86_64.rpm
>>>>>>>>>>>
>>>>>>>>>>> 13. yum install hadoop
>>>>>>>>>>>
>>>>>>>>>>> 14. yum install hadoop-fuse fuse-libs
>>>>>>>>>>>
>>>>>>>>>>> 15. In /etc/fstab:
>>>>>>>>>>>
>>>>>>>>>>> a.     hdfs# /hdfs/client01 fuse
>>>>>>>>>>> server=<server_hostname>,port=9000,rdbuffer=1048576,allow_other,big_writes
0
>>>>>>>>>>> 0
>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>> 16. Then execute command:
>>>>>>>>>>>
>>>>>>>>>>> a.     mount /hdfs/client01
>>>>>>>>>>>
>>>>>>>>>>> b.      ls /hdfs/client01
>>>>>>>>>>>
>>>>>>>>>>> 17. References:
>>>>>>>>>>>
>>>>>>>>>>> a.     https://twiki.grid.iu.edu/bin/view/Storage/HadoopInstallation
>>>>>>>>>>>
>>>>>>>>>>> b.     http://wiki.apache.org/hadoop/MountableHDFS
>>>>>>>>>>>
>>>>>>>>>>> 18. Error:
>>>>>>>>>>>
>>>>>>>>>>> a.     After performing a “yum upgrade”
on the entire system it broke the
>>>>>>>>>>> fuse installation:
>>>>>>>>>>>
>>>>>>>>>>>                              
               i.     [root@host1 ~]#
>>>>>>>>>>> modprobe fuse
>>>>>>>>>>> FATAL: Module fuse not found.
>>>>>>>>>>>
>>>>>>>>>>>                              
              ii.     [root@host1 ~]# mount
>>>>>>>>>>> /hdfs/client01
>>>>>>>>>>> port=11091,server=#A#####
>>>>>>>>>>> fuse-dfs didn't recognize /hdfs/client01,-2
>>>>>>>>>>> fuse-dfs ignoring option allow_other
>>>>>>>>>>> fuse: device not found, try 'modprobe fuse' first
>>>>>>>>>>>
>>>>>>>>>>> b.     Solution: Reinstall newer RPM’s:
>>>>>>>>>>>
>>>>>>>>>>>                              
               i.
>>>>>>>>>>> fuse-2.7.4-8_10.el5.x86_64.rpm
>>>>>>>>>>>
>>>>>>>>>>>                              
              ii.
>>>>>>>>>>> fuse-kmdl-2.6.18-128.1.10.el5-2.7.4-8_10.el5.x86_64.rpm
>>>>>>>>>>>
>>>>>>>>>>>                              
            iii.
>>>>>>>>>>> fuse-libs-2.7.4-8_10.el5.x86_64.rpm
>>>>>>>>>>>
>>>>>>>>>>>                              
            iv.
>>>>>>>>>>> hadoop-0.19.1-7.el5.x86_64.rpm
>>>>>>>>>>>
>>>>>>>>>>>                              
              v.
>>>>>>>>>>> hadoop-fuse-0.19.1-8.el5.x86_64.rpm
>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>> 2009/12/13 lwm <lwm_zju@126.com>
>>>>>>>>>>>
>>>>>>>>>>>> Hi all
>>>>>>>>>>>> I have installed hadoop with version 0.18.2,
and I want to use fuse in
>>>>>>>>>>>> hadoop. Following the src/contrib/fuse-dfs/README,
I executed "ant
>>>>>>>>>>>> compile-contrib -Dlibhdfs=1 -Dfusedfs=1 ",
an error occured. I can't fix it.
>>>>>>>>>>>> Can anyone help me? Or is there a good install
guide for me?
>>>>>>>>>>>> thanks.
>>>>>>>>>>>> When I executed "ant compile-contrib -Dcompile.c++=1
-Dfusedfs=1
>>>>>>>>>>>> -Dlibhdfs.noperms=1", it was builded successfully.
>>>>>>>>>>>>
>>>>>>>>>>>> compile:
>>>>>>>>>>>>    [echo] contrib: fuse-dfs
>>>>>>>>>>>>    [exec] automake: Makefile.am: required
file `./NEWS' not found
>>>>>>>>>>>>    [exec] automake: Makefile.am: required
file `./AUTHORS' not found
>>>>>>>>>>>>    [exec] automake: Makefile.am: required
file `./ChangeLog' not found
>>>>>>>>>>>>    [exec] src/Makefile.am:19: invalid unused
variable name: `AM_LDFLAGS'
>>>>>>>>>>>>    [exec] configure: error: cannot run
/bin/bash ./config.sub
>>>>>>>>>>>> BUILD FAILED
>>>>>>>>>>>> /home/wm/work/hadoop/build.xml:410: The following
error occurred while
>>>>>>>>>>>> executing this line:
>>>>>>>>>>>> /home/wm/work/hadoop/src/contrib/build.xml:30:
The following error occurred
>>>>>>>>>>>> while executing this line:
>>>>>>>>>>>> /home/wm/work/hadoop/src/contrib/fuse-dfs/build.xml:54:
exec returned: 1
>>>>>>>>>>>>
>>>>>>>>>>>>
>>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>> --
>>>>>>>>>
>>>>>>>>>
>>>>>>>>
>>>>>>>
>>>>>
>>>>>
>>>>>
>>>
>>>
>>
>

Mime
View raw message