<bdo id='NEhHW'></bdo><ul id='NEhHW'></ul>

      <legend id='NEhHW'><style id='NEhHW'><dir id='NEhHW'><q id='NEhHW'></q></dir></style></legend>

      <small id='NEhHW'></small><noframes id='NEhHW'>

    1. <tfoot id='NEhHW'></tfoot>

      <i id='NEhHW'><tr id='NEhHW'><dt id='NEhHW'><q id='NEhHW'><span id='NEhHW'><b id='NEhHW'><form id='NEhHW'><ins id='NEhHW'></ins><ul id='NEhHW'></ul><sub id='NEhHW'></sub></form><legend id='NEhHW'></legend><bdo id='NEhHW'><pre id='NEhHW'><center id='NEhHW'></center></pre></bdo></b><th id='NEhHW'></th></span></q></dt></tr></i><div id='NEhHW'><tfoot id='NEhHW'></tfoot><dl id='NEhHW'><fieldset id='NEhHW'></fieldset></dl></div>

      1. hadoop mapreduce:java.lang.UnsatisfiedLinkError:org.apache.ha

        时间:2023-09-26
        • <bdo id='jaBPj'></bdo><ul id='jaBPj'></ul>

            • <legend id='jaBPj'><style id='jaBPj'><dir id='jaBPj'><q id='jaBPj'></q></dir></style></legend>
            • <i id='jaBPj'><tr id='jaBPj'><dt id='jaBPj'><q id='jaBPj'><span id='jaBPj'><b id='jaBPj'><form id='jaBPj'><ins id='jaBPj'></ins><ul id='jaBPj'></ul><sub id='jaBPj'></sub></form><legend id='jaBPj'></legend><bdo id='jaBPj'><pre id='jaBPj'><center id='jaBPj'></center></pre></bdo></b><th id='jaBPj'></th></span></q></dt></tr></i><div id='jaBPj'><tfoot id='jaBPj'></tfoot><dl id='jaBPj'><fieldset id='jaBPj'></fieldset></dl></div>
              <tfoot id='jaBPj'></tfoot>

                    <tbody id='jaBPj'></tbody>

                  <small id='jaBPj'></small><noframes id='jaBPj'>

                  本文介绍了hadoop mapreduce:java.lang.UnsatisfiedLinkError:org.apache.hadoop.util.NativeCodeLoader.buildSupportsSnappy()Z的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

                  问题描述

                  我正在尝试从 map-reduce 作业中编写一个快速的块压缩序列文件.我在用hadoop 2.0.0-cdh4.5.0 和 snappy-java 1.0.4.1

                  I am trying to write a snappy block compressed sequence file from a map-reduce job. I am using hadoop 2.0.0-cdh4.5.0, and snappy-java 1.0.4.1

                  这是我的代码:

                  package jinvestor.jhouse.mr;
                  
                  import java.io.ByteArrayOutputStream;
                  import java.io.IOException;
                  import java.io.OutputStream;
                  import java.util.Arrays;
                  import java.util.List;
                  
                  import jinvestor.jhouse.core.House;
                  import jinvestor.jhouse.core.util.HouseAvroUtil;
                  import jinvestor.jhouse.download.HBaseHouseDAO;
                  
                  import org.apache.commons.io.IOUtils;
                  import org.apache.hadoop.conf.Configuration;
                  import org.apache.hadoop.fs.FileSystem;
                  import org.apache.hadoop.fs.LocatedFileStatus;
                  import org.apache.hadoop.fs.Path;
                  import org.apache.hadoop.fs.RemoteIterator;
                  import org.apache.hadoop.hbase.client.Result;
                  import org.apache.hadoop.hbase.client.Scan;
                  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
                  import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
                  import org.apache.hadoop.hbase.mapreduce.TableMapper;
                  import org.apache.hadoop.hbase.util.Bytes;
                  import org.apache.hadoop.io.LongWritable;
                  import org.apache.hadoop.io.SequenceFile;
                  import org.apache.hadoop.io.compress.CompressionCodec;
                  import org.apache.hadoop.io.compress.SnappyCodec;
                  import org.apache.hadoop.mapred.FileOutputFormat;
                  import org.apache.hadoop.mapred.JobConf;
                  import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
                  import org.apache.hadoop.mapreduce.Job;
                  import org.apache.mahout.math.DenseVector;
                  import org.apache.mahout.math.NamedVector;
                  import org.apache.mahout.math.VectorWritable;
                  
                  /**
                   * Produces mahout vectors from House entries in HBase.
                   * 
                   * @author Michael Scott Knapp
                   * 
                   */
                  public class HouseVectorizer {
                  
                      private final Configuration configuration;
                      private final House minimumHouse;
                      private final House maximumHouse;
                  
                      public HouseVectorizer(final Configuration configuration,
                              final House minimumHouse, final House maximumHouse) {
                          this.configuration = configuration;
                          this.minimumHouse = minimumHouse;
                          this.maximumHouse = maximumHouse;
                      }
                  
                      public void vectorize() throws IOException, ClassNotFoundException, InterruptedException {
                          JobConf jobConf = new JobConf();
                          jobConf.setMapOutputKeyClass(LongWritable.class);
                          jobConf.setMapOutputValueClass(VectorWritable.class);
                  
                          // we want the vectors written straight to HDFS,
                          // the order does not matter.
                          jobConf.setNumReduceTasks(0);
                  
                          Path outputDir = new Path("/home/cloudera/house_vectors");
                          FileSystem fs = FileSystem.get(configuration);
                          if (fs.exists(outputDir)) {
                              fs.delete(outputDir, true);
                          }
                  
                          FileOutputFormat.setOutputPath(jobConf, outputDir);
                  
                          // I want the mappers to know the max and min value
                          // so they can normalize the data.
                          // I will add them as properties in the configuration,
                          // by serializing them with avro.
                          String minmax = HouseAvroUtil.toBase64String(Arrays.asList(minimumHouse,
                                  maximumHouse));
                          jobConf.set("minmax", minmax);
                  
                          Job job = Job.getInstance(jobConf);
                          Scan scan = new Scan();
                          scan.addFamily(Bytes.toBytes("data"));
                          TableMapReduceUtil.initTableMapperJob("homes", scan,
                                  HouseVectorizingMapper.class, LongWritable.class,
                                  VectorWritable.class, job);
                          job.setOutputFormatClass(SequenceFileOutputFormat.class);
                          job.setOutputKeyClass(LongWritable.class);
                          job.setOutputValueClass(VectorWritable.class);
                          job.setMapOutputKeyClass(LongWritable.class);
                          job.setMapOutputValueClass(VectorWritable.class);
                  
                          SequenceFileOutputFormat.setOutputCompressionType(job, SequenceFile.CompressionType.BLOCK);
                          SequenceFileOutputFormat.setOutputCompressorClass(job, SnappyCodec.class);
                          SequenceFileOutputFormat.setOutputPath(job, outputDir);
                          job.getConfiguration().setClass("mapreduce.map.output.compress.codec", 
                                  SnappyCodec.class, 
                                  CompressionCodec.class);
                  
                          job.waitForCompletion(true);
                      }
                  

                  当我运行它时,我得到了这个:

                  When I run it I get this:

                  java.lang.Exception: java.lang.UnsatisfiedLinkError: org.apache.hadoop.util.NativeCodeLoader.buildSupportsSnappy()Z
                      at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:401)
                  Caused by: java.lang.UnsatisfiedLinkError: org.apache.hadoop.util.NativeCodeLoader.buildSupportsSnappy()Z
                      at org.apache.hadoop.util.NativeCodeLoader.buildSupportsSnappy(Native Method)
                      at org.apache.hadoop.io.compress.SnappyCodec.checkNativeCodeLoaded(SnappyCodec.java:62)
                      at org.apache.hadoop.io.compress.SnappyCodec.getCompressorType(SnappyCodec.java:127)
                      at org.apache.hadoop.io.compress.CodecPool.getCompressor(CodecPool.java:104)
                      at org.apache.hadoop.io.compress.CodecPool.getCompressor(CodecPool.java:118)
                      at org.apache.hadoop.io.SequenceFile$Writer.init(SequenceFile.java:1169)
                      at org.apache.hadoop.io.SequenceFile$Writer.<init>(SequenceFile.java:1080)
                      at org.apache.hadoop.io.SequenceFile$BlockCompressWriter.<init>(SequenceFile.java:1400)
                      at org.apache.hadoop.io.SequenceFile.createWriter(SequenceFile.java:274)
                      at org.apache.hadoop.io.SequenceFile.createWriter(SequenceFile.java:527)
                      at org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat.getSequenceWriter(SequenceFileOutputFormat.java:64)
                      at org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat.getRecordWriter(SequenceFileOutputFormat.java:75)
                      at org.apache.hadoop.mapred.MapTask$NewDirectOutputCollector.<init>(MapTask.java:617)
                      at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:737)
                      at org.apache.hadoop.mapred.MapTask.run(MapTask.java:338)
                      at org.apache.hadoop.mapred.LocalJobRunner$Job$MapTaskRunnable.run(LocalJobRunner.java:233)
                      at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
                      at java.util.concurrent.FutureTask.run(FutureTask.java:262)
                      at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
                      at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
                      at java.lang.Thread.run(Thread.java:744)
                  

                  如果我注释掉这些行,那么我的测试通过:

                  If I comment out these lines then my test passes:

                  SequenceFileOutputFormat.setOutputCompressionType(job, SequenceFile.CompressionType.BLOCK);
                          SequenceFileOutputFormat.setOutputCompressorClass(job, SnappyCodec.class);
                          job.getConfiguration().setClass("mapreduce.map.output.compress.coded", 
                                  SnappyCodec.class, 
                                  CompressionCodec.class);
                  

                  但是,我真的很想在我的序列文件中使用 snappy 压缩.有人可以向我解释我做错了什么吗?

                  However, I really want to use snappy compression in my sequence files. Can somebody please explain to me what I am doing wrong?

                  推荐答案

                  从Cloudera 社区

                  1. 确保 LD_LIBRARY_PATHJAVA_LIBRARY_PATH 包含具有 libsnappy.so** 文件的本机目录路径.
                  2. 确保已在 SPARK 环境中导出了 LD_LIBRARY_PATH 和 JAVA_LIBRARY 路径(spark-env.sh).
                  1. Ensure that LD_LIBRARY_PATH and JAVA_LIBRARY_PATH contains the native directory path having the libsnappy.so** files.
                  2. Ensure that LD_LIBRARY_PATH and JAVA_LIBRARY path have been exported in the SPARK environment(spark-env.sh).

                  例如,我使用 Hortonworks HDP,我的 spark-env.sh

                  For example I use Hortonworks HDP and I have the following configuration in my spark-env.sh

                  export JAVA_LIBRARY_PATH=$JAVA_LIBRARY_PATH:/usr/hdp/2.2.0.0-2041/hadoop/lib/native
                  export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/hdp/2.2.0.0-2041/hadoop/lib/native
                  export SPARK_YARN_USER_ENV="JAVA_LIBRARY_PATH=$JAVA_LIBRARY_PATH,LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
                  

                  这篇关于hadoop mapreduce:java.lang.UnsatisfiedLinkError:org.apache.hadoop.util.NativeCodeLoader.buildSupportsSnappy()Z的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持html5模板网!

                  上一篇:尝试格式化 namenode 时找不到或加载主类;在 MA 下一篇:火花 java.lang.StackOverflowError

                  相关文章

                  最新文章

                    <bdo id='cWnG6'></bdo><ul id='cWnG6'></ul>
                    <tfoot id='cWnG6'></tfoot>
                    1. <legend id='cWnG6'><style id='cWnG6'><dir id='cWnG6'><q id='cWnG6'></q></dir></style></legend>
                    2. <i id='cWnG6'><tr id='cWnG6'><dt id='cWnG6'><q id='cWnG6'><span id='cWnG6'><b id='cWnG6'><form id='cWnG6'><ins id='cWnG6'></ins><ul id='cWnG6'></ul><sub id='cWnG6'></sub></form><legend id='cWnG6'></legend><bdo id='cWnG6'><pre id='cWnG6'><center id='cWnG6'></center></pre></bdo></b><th id='cWnG6'></th></span></q></dt></tr></i><div id='cWnG6'><tfoot id='cWnG6'></tfoot><dl id='cWnG6'><fieldset id='cWnG6'></fieldset></dl></div>

                      <small id='cWnG6'></small><noframes id='cWnG6'>