当前位置: 移动技术网 > IT编程>开发语言>Java > Java访问Hadoop分布式文件系统HDFS的配置说明

Java访问Hadoop分布式文件系统HDFS的配置说明

2019年07月22日  | 移动技术网IT编程  | 我要评论

配置文件

m103替换为hdfs服务地址。
要利用java客户端来存取hdfs上的文件,不得不说的是配置文件hadoop-0.20.2/conf/core-site.xml了,最初我就是在这里吃了大亏,所以我死活连不上hdfs,文件无法创建、读取。

<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<configuration>
<!--- global properties -->
<property>
<name>hadoop.tmp.dir</name>
<value>/home/zhangzk/hadoop</value>
<description>a base for other temporary directories.</description>
</property>
<!-- file system properties -->
<property>
<name>fs.default.name</name>
<value>hdfs://linux-zzk-113:9000</value>
</property>
</configuration>

配置项:hadoop.tmp.dir表示命名节点上存放元数据的目录位置,对于数据节点则为该节点上存放文件数据的目录。

配置项:fs.default.name表示命名的ip地址和端口号,缺省值是file:///,对于javaapi来讲,连接hdfs必须使用这里的配置的url地址,对于数据节点来讲,数据节点通过该url来访问命名节点。

hdfs-site.xml

<?xml version="1.0" encoding="utf-8"?>

<!--autogenerated by cloudera manager-->
<configuration>
 <property>
  <name>dfs.namenode.name.dir</name>
  <value>file:///mnt/sdc1/dfs/nn</value>
 </property>
 <property>
  <name>dfs.namenode.servicerpc-address</name>
  <value>m103:8022</value>
 </property>
 <property>
  <name>dfs.https.address</name>
  <value>m103:50470</value>
 </property>
 <property>
  <name>dfs.https.port</name>
  <value>50470</value>
 </property>
 <property>
  <name>dfs.namenode.http-address</name>
  <value>m103:50070</value>
 </property>
 <property>
  <name>dfs.replication</name>
  <value>3</value>
 </property>
 <property>
  <name>dfs.blocksize</name>
  <value>134217728</value>
 </property>
 <property>
  <name>dfs.client.use.datanode.hostname</name>
  <value>false</value>
 </property>
 <property>
  <name>fs.permissions.umask-mode</name>
  <value>022</value>
 </property>
 <property>
  <name>dfs.namenode.acls.enabled</name>
  <value>false</value>
 </property>
 <property>
  <name>dfs.block.local-path-access.user</name>
  <value>cloudera-scm</value>
 </property>
 <property>
  <name>dfs.client.read.shortcircuit</name>
  <value>false</value>
 </property>
 <property>
  <name>dfs.domain.socket.path</name>
  <value>/var/run/hdfs-sockets/dn</value>
 </property>
 <property>
  <name>dfs.client.read.shortcircuit.skip.checksum</name>
  <value>false</value>
 </property>
 <property>
  <name>dfs.client.domain.socket.data.traffic</name>
  <value>false</value>
 </property>
 <property>
  <name>dfs.datanode.hdfs-blocks-metadata.enabled</name>
  <value>true</value>
 </property>
 <property>
  <name>fs.http.impl</name>
  <value>com.scistor.datavision.fs.httpfilesystem</value>
 </property>
</configuration>

mapred-site.xml

<?xml version="1.0" encoding="utf-8"?>

<!--autogenerated by cloudera manager-->
<configuration>
 <property>
  <name>mapreduce.job.split.metainfo.maxsize</name>
  <value>10000000</value>
 </property>
 <property>
  <name>mapreduce.job.counters.max</name>
  <value>120</value>
 </property>
 <property>
  <name>mapreduce.output.fileoutputformat.compress</name>
  <value>true</value>
 </property>
 <property>
  <name>mapreduce.output.fileoutputformat.compress.type</name>
  <value>block</value>
 </property>
 <property>
  <name>mapreduce.output.fileoutputformat.compress.codec</name>
  <value>org.apache.hadoop.io.compress.snappycodec</value>
 </property>
 <property>
  <name>mapreduce.map.output.compress.codec</name>
  <value>org.apache.hadoop.io.compress.snappycodec</value>
 </property>
 <property>
  <name>mapreduce.map.output.compress</name>
  <value>true</value>
 </property>
 <property>
  <name>zlib.compress.level</name>
  <value>default_compression</value>
 </property>
 <property>
  <name>mapreduce.task.io.sort.factor</name>
  <value>64</value>
 </property>
 <property>
  <name>mapreduce.map.sort.spill.percent</name>
  <value>0.8</value>
 </property>
 <property>
  <name>mapreduce.reduce.shuffle.parallelcopies</name>
  <value>10</value>
 </property>
 <property>
  <name>mapreduce.task.timeout</name>
  <value>600000</value>
 </property>
 <property>
  <name>mapreduce.client.submit.file.replication</name>
  <value>1</value>
 </property>
 <property>
  <name>mapreduce.job.reduces</name>
  <value>24</value>
 </property>
 <property>
  <name>mapreduce.task.io.sort.mb</name>
  <value>256</value>
 </property>
 <property>
  <name>mapreduce.map.speculative</name>
  <value>false</value>
 </property>
 <property>
  <name>mapreduce.reduce.speculative</name>
  <value>false</value>
 </property>
 <property>
  <name>mapreduce.job.reduce.slowstart.completedmaps</name>
  <value>0.8</value>
 </property>
 <property>
  <name>mapreduce.jobhistory.address</name>
  <value>m103:10020</value>
 </property>
 <property>
  <name>mapreduce.jobhistory.webapp.address</name>
  <value>m103:19888</value>
 </property>
 <property>
  <name>mapreduce.jobhistory.webapp.https.address</name>
  <value>m103:19890</value>
 </property>
 <property>
  <name>mapreduce.jobhistory.admin.address</name>
  <value>m103:10033</value>
 </property>
 <property>
  <name>mapreduce.framework.name</name>
  <value>yarn</value>
 </property>
 <property>
  <name>yarn.app.mapreduce.am.staging-dir</name>
  <value>/user</value>
 </property>
 <property>
  <name>mapreduce.am.max-attempts</name>
  <value>2</value>
 </property>
 <property>
  <name>yarn.app.mapreduce.am.resource.mb</name>
  <value>2048</value>
 </property>
 <property>
  <name>yarn.app.mapreduce.am.resource.cpu-vcores</name>
  <value>1</value>
 </property>
 <property>
  <name>mapreduce.job.ubertask.enable</name>
  <value>false</value>
 </property>
 <property>
  <name>yarn.app.mapreduce.am.command-opts</name>
  <value>-djava.net.preferipv4stack=true -xmx1717986918</value>
 </property>
 <property>
  <name>mapreduce.map.java.opts</name>
  <value>-djava.net.preferipv4stack=true -xmx1717986918</value>
 </property>
 <property>
  <name>mapreduce.reduce.java.opts</name>
  <value>-djava.net.preferipv4stack=true -xmx2576980378</value>
 </property>
 <property>
  <name>yarn.app.mapreduce.am.admin.user.env</name>
  <value>ld_library_path=$hadoop_common_home/lib/native:$java_library_path</value>
 </property>
 <property>
  <name>mapreduce.map.memory.mb</name>
  <value>2048</value>
 </property>
 <property>
  <name>mapreduce.map.cpu.vcores</name>
  <value>1</value>
 </property>
 <property>
  <name>mapreduce.reduce.memory.mb</name>
  <value>3072</value>
 </property>
 <property>
  <name>mapreduce.reduce.cpu.vcores</name>
  <value>1</value>
 </property>
 <property>
  <name>mapreduce.application.classpath</name>
  <value>$hadoop_mapred_home/*,$hadoop_mapred_home/lib/*,$mr2_classpath,$cdh_hcat_home/share/hcatalog/*,$cdh_hive_home/lib/*,/etc/hive/conf,/opt/cloudera/parcels/cdh/lib/udps/*</value>
 </property>
 <property>
  <name>mapreduce.admin.user.env</name>
  <value>ld_library_path=$hadoop_common_home/lib/native:$java_library_path</value>
 </property>
 <property>
  <name>mapreduce.shuffle.max.connections</name>
  <value>80</value>
 </property>
</configuration>

利用javaapi来访问hdfs的文件与目录

package com.demo.hdfs;

import java.io.bufferedinputstream;
import java.io.fileinputstream;
import java.io.filenotfoundexception;
import java.io.fileoutputstream;
import java.io.ioexception;
import java.io.inputstream;
import java.io.outputstream;
import java.net.uri;

import org.apache.hadoop.conf.configuration;
import org.apache.hadoop.fs.fsdatainputstream;
import org.apache.hadoop.fs.fsdataoutputstream;
import org.apache.hadoop.fs.filestatus;
import org.apache.hadoop.fs.filesystem;
import org.apache.hadoop.fs.path;
import org.apache.hadoop.io.ioutils;
import org.apache.hadoop.util.progressable;

/**
 * @author zhangzk
 * 
 */
public class filecopytohdfs {

 public static void main(string[] args) throws exception {
 try {
  //uploadtohdfs();  
  //deletefromhdfs();
  //getdirectoryfromhdfs();
  appendtohdfs();
  readfromhdfs();
 } catch (exception e) {
  // todo auto-generated catch block
  e.printstacktrace();
 }
 finally
 {
  system.out.println("success");
 }
 }

 /**上传文件到hdfs上去*/

 private static void uploadtohdfs() throws filenotfoundexception,ioexception {
 string localsrc = "d://qq.txt";
 string dst = "hdfs://192.168.0.113:9000/user/zhangzk/qq.txt";
 inputstream in = new bufferedinputstream(new fileinputstream(localsrc));
 configuration conf = new configuration();
 
 filesystem fs = filesystem.get(uri.create(dst), conf);
 outputstream out = fs.create(new path(dst), new progressable() {
  public void progress() {
  system.out.print(".");
  }
 });
 ioutils.copybytes(in, out, 4096, true);
 }





 /**从hdfs上读取文件*/
 private static void readfromhdfs() throws filenotfoundexception,ioexception {
 string dst = "hdfs://192.168.0.113:9000/user/zhangzk/qq.txt"; 
 configuration conf = new configuration(); 
 filesystem fs = filesystem.get(uri.create(dst), conf);
 fsdatainputstream hdfsinstream = fs.open(new path(dst));
 
 outputstream out = new fileoutputstream("d:/qq-hdfs.txt"); 
 byte[] iobuffer = new byte[1024];
 int readlen = hdfsinstream.read(iobuffer);

 while(-1 != readlen){
 out.write(iobuffer, 0, readlen); 
 readlen = hdfsinstream.read(iobuffer);
 }
 out.close();
 hdfsinstream.close();
 fs.close();
 }
 

 /**以append方式将内容添加到hdfs上文件的末尾;注意:文件更新,需要在hdfs-site.xml中添<property><name>dfs.append.support</name><value>true</value></property>*/
 private static void appendtohdfs() throws filenotfoundexception,ioexception {
 string dst = "hdfs://192.168.0.113:9000/user/zhangzk/qq.txt"; 
 configuration conf = new configuration(); 
 filesystem fs = filesystem.get(uri.create(dst), conf); 
 fsdataoutputstream out = fs.append(new path(dst));

 int readlen = "zhangzk add by hdfs java api".getbytes().length;

 while(-1 != readlen){
 out.write("zhangzk add by hdfs java api".getbytes(), 0, readlen);
 }
 out.close();
 fs.close();
 }
 

 /**从hdfs上删除文件*/
 private static void deletefromhdfs() throws filenotfoundexception,ioexception {
 string dst = "hdfs://192.168.0.113:9000/user/zhangzk/qq-bak.txt"; 
 configuration conf = new configuration(); 
 filesystem fs = filesystem.get(uri.create(dst), conf);
 fs.deleteonexit(new path(dst));
 fs.close();
 }
 

 /**遍历hdfs上的文件和目录*/
 private static void getdirectoryfromhdfs() throws filenotfoundexception,ioexception {
 string dst = "hdfs://192.168.0.113:9000/user/zhangzk"; 
 configuration conf = new configuration(); 
 filesystem fs = filesystem.get(uri.create(dst), conf);
 filestatus filelist[] = fs.liststatus(new path(dst));
 int size = filelist.length;
 for(int i = 0; i < size; i++){
 system.out.println("name:" + filelist[i].getpath().getname() + "/t/tsize:" + filelist[i].getlen());
 }
 fs.close();
 } 

}

注意:对于append操作,从hadoop-0.21版本开始就不支持了,关于append的操作可以参考javaeye上的一篇文档。

如对本文有疑问, 点击进行留言回复!!

相关文章:

验证码:
移动技术网