当前位置: 代码迷 >> 综合 >> Hadoop2.6 HDFS IDEA Java客户端操作
  详细解决方案

Hadoop2.6 HDFS IDEA Java客户端操作

热度:33   发布时间:2023-11-21 13:56:52.0

linux启动hadoop节点

[root@mihaoyu151 hadoop]# start-all.sh
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
21/10/26 02:53:21 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
Starting namenodes on [mihaoyu151]
mihaoyu151: starting namenode, logging to /opt/soft/hadoop260/logs/hadoop-root-namenode-mihaoyu151.out
mihaoyu151: starting datanode, logging to /opt/soft/hadoop260/logs/hadoop-root-datanode-mihaoyu151.out
Starting secondary namenodes [mihaoyu151]
mihaoyu151: starting secondarynamenode, logging to /opt/soft/hadoop260/logs/hadoop-root-secondarynamenode-mihaoyu151.out
21/10/26 02:53:42 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
starting yarn daemons
starting resourcemanager, logging to /opt/soft/hadoop260/logs/yarn-root-resourcemanager-mihaoyu151.out
mihaoyu151: starting nodemanager, logging to /opt/soft/hadoop260/logs/yarn-root-nodemanager-mihaoyu151.out
[root@mihaoyu151 hadoop]# jps
1875 DataNode
1782 NameNode
2200 ResourceManager
2616 Jps
2057 SecondaryNameNode
2333 NodeManager

启动历史节点

[root@mihaoyu151 hadoop]# mr-jobhistory-daemon.sh start historyserver
starting historyserver, logging to /opt/soft/hadoop260/logs/mapred-root-historyserver-mihaoyu151.out
[root@mihaoyu151 hadoop]# jps
1875 DataNode
2660 JobHistoryServer
2693 Jps
1782 NameNode
2200 ResourceManager
2057 SecondaryNameNode
2333 NodeManager

IDEA创建Maven项目
在这里插入图片描述
配置pom.xml

<?xml version="1.0" encoding="UTF-8"?><project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"><modelVersion>4.0.0</modelVersion><groupId>nj.zb</groupId><artifactId>hadoopstu</artifactId><version>1.0-SNAPSHOT</version><name>hadoopstu</name><!-- FIXME change it to the project's website --><url>http://www.example.com</url><properties><project.build.sourceEncoding>UTF-8</project.build.sourceEncoding><maven.compiler.source>1.8</maven.compiler.source><maven.compiler.target>1.8</maven.compiler.target><hadoop-version>2.6.0</hadoop-version></properties><dependencies><dependency><groupId>org.apache.hadoop</groupId><artifactId>hadoop-common</artifactId><version>${hadoop-version}</version></dependency><dependency><groupId>org.apache.hadoop</groupId><artifactId>hadoop-hdfs</artifactId><version>${hadoop-version}</version></dependency><dependency><groupId>org.apache.hadoop</groupId><artifactId>hadoop-mapreduce-client-core</artifactId><version>${hadoop-version}</version></dependency><dependency><groupId>org.apache.hadoop</groupId><artifactId>hadoop-mapreduce-client-common</artifactId><version>${hadoop-version}</version></dependency><dependency><groupId>junit</groupId><artifactId>junit</artifactId><version>4.11</version><scope>test</scope></dependency></dependencies><build><pluginManagement><!-- lock down plugins versions to avoid using Maven defaults (may be moved to parent pom) --><plugins><!-- clean lifecycle, see https://maven.apache.org/ref/current/maven-core/lifecycles.html#clean_Lifecycle --><plugin><artifactId>maven-clean-plugin</artifactId><version>3.1.0</version></plugin><!-- default lifecycle, jar packaging: see https://maven.apache.org/ref/current/maven-core/default-bindings.html#Plugin_bindings_for_jar_packaging --><plugin><artifactId>maven-resources-plugin</artifactId><version>3.0.2</version></plugin><plugin><artifactId>maven-compiler-plugin</artifactId><version>3.8.0</version></plugin><plugin><artifactId>maven-surefire-plugin</artifactId><version>2.22.1</version></plugin><plugin><artifactId>maven-jar-plugin</artifactId><version>3.0.2</version></plugin><plugin><artifactId>maven-install-plugin</artifactId><version>2.5.2</version></plugin><plugin><artifactId>maven-deploy-plugin</artifactId><version>2.8.2</version></plugin><!-- site lifecycle, see https://maven.apache.org/ref/current/maven-core/lifecycles.html#site_Lifecycle --><plugin><artifactId>maven-site-plugin</artifactId><version>3.7.1</version></plugin><plugin><artifactId>maven-project-info-reports-plugin</artifactId><version>3.0.0</version></plugin></plugins></pluginManagement></build>
</project>

新建HDFSop.java文件夹

package nj.zb;import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;public class HDFSop {
    public static void main(String[] args) throws Exception {
    hdfsWriteFile("G:\\kgc\\KB15\\code\\hadoopstu\\resource\\hellowworld.txt","hdfs://192.168.133.151:9000/","hdfs://192.168.133.151:9000/input/hw.txt");hdfsReadFile("hdfs://192.168.133.151:9000/input/hw.txt","hdfs://192.168.133.151:9000/","G:\\kgc\\KB15\\code\\hadoopstu\\helloworld.txt");}public static void hdfsReadFile(String hdfsFile, String hdfsUrl, String fileName) throws Exception {
    Configuration cfg = new Configuration();cfg.set("fs.defaultFS",hdfsUrl);FileSystem fileSystem = FileSystem.get(cfg);if(!fileSystem.exists(new Path(hdfsFile))){
    throw new Exception("要下载的文件内容不存在!");}try {
    // 读取操作,从hdfs指定的文件读出数据对象FSDataInputStream fsdiStream = fileSystem.open(new Path(hdfsFile));try {
    // 将数据输出到本地文件的流对象FileOutputStream fileOutputStream = new FileOutputStream(fileName);try {
    byte[] buffer = new byte[2048];int count = fsdiStream.read(buffer, 0, 2048);while (count>0){
    fileOutputStream.write(buffer,0,count);count = fsdiStream.read(buffer,0,2048);}} catch (IOException e) {
    e.printStackTrace();} finally {
    fileOutputStream.close();}} catch (FileNotFoundException e) {
    e.printStackTrace();} finally {
    fsdiStream.close();}} catch (IOException e) {
    e.printStackTrace();} catch (IllegalArgumentException e) {
    e.printStackTrace();}}public static void hdfsWriteFile(String fileName, String hdfsUrl, String hdfsFile) throws Exception {
    Configuration cfg=new Configuration();cfg.set("fs.defaultFS",hdfsUrl);FileSystem fileSystem = FileSystem.get(cfg);if(fileSystem.exists(new Path(hdfsFile))){
    throw new Exception("目标文件已经存在!");}try {
    // 输出流对象,将数据输出到HDFS文件系统FSDataOutputStream fsDataOutputStream = fileSystem.create(new Path(hdfsFile));try {
    // 输入流对象,将本地要上传的文件读取到内存中FileInputStream fileInputStream = new FileInputStream(fileName) ;try {
    byte[] buffer = new byte[2048];int count = fileInputStream.read(buffer, 0, 2048);while (count>0){
    fsDataOutputStream.write(buffer,0,count);   // 输出操作count=fileInputStream.read(buffer,0,2048);}} catch (IOException e) {
    e.printStackTrace();} finally {
    fileInputStream.close();}} catch (FileNotFoundException e) {
    e.printStackTrace();} finally {
    fsDataOutputStream.close();}} catch (IOException e) {
    e.printStackTrace();}}
}
  相关解决方案