微信公众号搜"智元新知"关注
微信扫一扫可直接关注哦!

org.apache.hadoop.mapreduce.RecordWriter的实例源码

项目:hadoop    文件ReduceContextImpl.java   
public ReduceContextImpl(Configuration conf,TaskAttemptID taskid,RawkeyvalueIterator input,Counter inputKeyCounter,Counter inputValueCounter,RecordWriter<KEYOUT,VALUEOUT> output,OutputCommitter committer,StatusReporter reporter,RawComparator<KEYIN> comparator,Class<KEYIN> keyClass,Class<VALUEIN> valueClass
                        ) throws InterruptedException,IOException{
  super(conf,taskid,output,committer,reporter);
  this.input = input;
  this.inputKeyCounter = inputKeyCounter;
  this.inputValueCounter = inputValueCounter;
  this.comparator = comparator;
  this.serializationFactory = new SerializationFactory(conf);
  this.keyDeserializer = serializationFactory.getDeserializer(keyClass);
  this.keyDeserializer.open(buffer);
  this.valueDeserializer = serializationFactory.getDeserializer(valueClass);
  this.valueDeserializer.open(buffer);
  hasMore = input.next();
  this.keyClass = keyClass;
  this.valueClass = valueClass;
  this.conf = conf;
  this.taskid = taskid;
}
项目:big_data    文件TransformerOutputFormat.java   
/**
 * 定义每条数据的输出格式,输入数据是由reduce任务每次执行write方法输出的数据
 */
@Override
public RecordWriter<BaseDimension,BaseStatsValueWritable> getRecordWriter(TaskAttemptContext context)
        throws IOException,InterruptedException {
    Configuration conf = context.getConfiguration();
    Connection conn = null;
    IDimensionConverter converter = new DimensionConverterImpl();
    try {
        conn = JdbcManager.getConnection(conf,GlobalConstants.WAREHOUSE_OF_REPORT);
        conn.setAutoCommit(false);
    } catch (sqlException e) {
        logger.error("获取数据库连接失败",e);
        throw new IOException("获取数据库连接失败",e);
    }
    return new TransformerRecordWriter(conn,conf,converter);
}
项目:aliyun-maxcompute-data-collectors    文件DBOutputFormat.java   
@Override
/** {@inheritDoc} */
public RecordWriter<K,V> getRecordWriter(TaskAttemptContext context)
    throws IOException {
  DBConfiguration dbConf = new DBConfiguration(context.getConfiguration());
  String tableName = dbConf.getoutputTableName();
  String[] fieldNames = dbConf.getoutputFieldNames();

  if (fieldNames == null) {
    fieldNames = new String[dbConf.getoutputFieldCount()];
  }

  try {
    Connection connection = dbConf.getConnection();
    PreparedStatement statement = null;

    statement = connection.prepareStatement(
                  constructQuery(tableName,fieldNames));
    return new com.cloudera.sqoop.mapreduce.db.DBOutputFormat.DBRecordWriter(
                   connection,statement);
  } catch (Exception ex) {
    throw new IOException(ex);
  }
}
项目:hadoop    文件TestRecovery.java   
private void writeBadOutput(TaskAttempt attempt,Configuration conf)
  throws Exception {
  TaskAttemptContext tContext = new TaskAttemptContextImpl(conf,TypeConverter.fromYarn(attempt.getID()));

  textoutputFormat<?,?> theOutputFormat = new textoutputFormat();
  RecordWriter theRecordWriter = theOutputFormat
      .getRecordWriter(tContext);

  NullWritable nullWritable = NullWritable.get();
  try {
    theRecordWriter.write(key2,val2);
    theRecordWriter.write(null,nullWritable);
    theRecordWriter.write(null,val2);
    theRecordWriter.write(nullWritable,val1);
    theRecordWriter.write(key1,nullWritable);
    theRecordWriter.write(key2,null);
    theRecordWriter.write(null,null);
    theRecordWriter.write(key1,val1);
  } finally {
    theRecordWriter.close(tContext);
  }

  OutputFormat outputFormat = ReflectionUtils.newInstance(
      tContext.getoutputFormatClass(),conf);
  OutputCommitter committer = outputFormat.getoutputCommitter(tContext);
  committer.commitTask(tContext);
}
项目:hadoop    文件TestRecovery.java   
private void writeOutput(TaskAttempt attempt,?> theOutputFormat = new textoutputFormat();
  RecordWriter theRecordWriter = theOutputFormat
      .getRecordWriter(tContext);

  NullWritable nullWritable = NullWritable.get();
  try {
    theRecordWriter.write(key1,val1);
    theRecordWriter.write(null,val1);
    theRecordWriter.write(nullWritable,val2);
    theRecordWriter.write(key2,nullWritable);
    theRecordWriter.write(key1,null);
    theRecordWriter.write(key2,val2);
  } finally {
    theRecordWriter.close(tContext);
  }

  OutputFormat outputFormat = ReflectionUtils.newInstance(
      tContext.getoutputFormatClass(),conf);
  OutputCommitter committer = outputFormat.getoutputCommitter(tContext);
  committer.commitTask(tContext);
}
项目:hadoop    文件TestMRCJCFileOutputCommitter.java   
@SuppressWarnings("unchecked")
private void writeOutput(RecordWriter theRecordWriter,TaskAttemptContext context) throws IOException,InterruptedException {
  NullWritable nullWritable = NullWritable.get();

  try {
    theRecordWriter.write(key1,val2);
  } finally {
    theRecordWriter.close(context);
  }
}
项目:hadoop    文件DBOutputFormat.java   
/** {@inheritDoc} */
public RecordWriter<K,V> getRecordWriter(TaskAttemptContext context) 
    throws IOException {
  DBConfiguration dbConf = new DBConfiguration(context.getConfiguration());
  String tableName = dbConf.getoutputTableName();
  String[] fieldNames = dbConf.getoutputFieldNames();

  if(fieldNames == null) {
    fieldNames = new String[dbConf.getoutputFieldCount()];
  }

  try {
    Connection connection = dbConf.getConnection();
    PreparedStatement statement = null;

    statement = connection.prepareStatement(
                  constructQuery(tableName,fieldNames));
    return new DBRecordWriter(connection,statement);
  } catch (Exception ex) {
    throw new IOException(ex.getMessage());
  }
}
项目:hadoop    文件Chain.java   
/**
 * Add mapper(the first mapper) that reads input from the input
 * context and writes to queue
 */
@SuppressWarnings("unchecked")
void addMapper(TaskInputOutputContext inputContext,ChainBlockingQueue<keyvaluePair<?,?>> output,int index)
    throws IOException,InterruptedException {
  Configuration conf = getConf(index);
  Class<?> keyOutClass = conf.getClass(MAPPER_OUTPUT_KEY_CLASS,Object.class);
  Class<?> valueOutClass = conf.getClass(MAPPER_OUTPUT_VALUE_CLASS,Object.class);

  RecordReader rr = new ChainRecordReader(inputContext);
  RecordWriter rw = new ChainRecordWriter(keyOutClass,valueOutClass,conf);
  Mapper.Context mapperContext = createMapContext(rr,rw,(MapContext) inputContext,getConf(index));
  MapRunner runner = new MapRunner(mappers.get(index),mapperContext,rr,rw);
  threads.add(runner);
}
项目:hadoop    文件Chain.java   
/**
 * Add mapper that reads and writes from/to the queue
 */
@SuppressWarnings("unchecked")
void addMapper(ChainBlockingQueue<keyvaluePair<?,?>> input,TaskInputOutputContext context,int index) throws IOException,InterruptedException {
  Configuration conf = getConf(index);
  Class<?> keyClass = conf.getClass(MAPPER_INPUT_KEY_CLASS,Object.class);
  Class<?> valueClass = conf.getClass(MAPPER_INPUT_VALUE_CLASS,Object.class);
  Class<?> keyOutClass = conf.getClass(MAPPER_OUTPUT_KEY_CLASS,Object.class);
  RecordReader rr = new ChainRecordReader(keyClass,valueClass,input,conf);
  RecordWriter rw = new ChainRecordWriter(keyOutClass,conf);
  MapRunner runner = new MapRunner(mappers.get(index),createMapContext(rr,context,getConf(index)),rw);
  threads.add(runner);
}
项目:hadoop    文件TestFileOutputCommitter.java   
private void writeOutput(RecordWriter theRecordWriter,val2);
  } finally {
    theRecordWriter.close(context);
  }
}
项目:ditb    文件TestHFileOutputFormat2.java   
/**
 * Write random values to the writer assuming a table created using
 * {@link #FAMILIES} as column family descriptors
 */
private void writerandomkeyvalues(RecordWriter<ImmutableBytesWritable,Cell> writer,TaskAttemptContext context,Set<byte[]> families,int numRows)
    throws IOException,InterruptedException {
  byte keyBytes[] = new byte[Bytes.SIZEOF_INT];
  int valLength = 10;
  byte valBytes[] = new byte[valLength];

  int taskId = context.getTaskAttemptID().getTaskID().getId();
  assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!";
  final byte [] qualifier = Bytes.toBytes("data");
  Random random = new Random();
  for (int i = 0; i < numRows; i++) {

    Bytes.putInt(keyBytes,i);
    random.nextBytes(valBytes);
    ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes);

    for (byte[] family : families) {
      Cell kv = new keyvalue(keyBytes,family,qualifier,valBytes);
      writer.write(key,kv);
    }
  }
}
项目:ditb    文件TestHFileOutputFormat.java   
/**
 * Write random values to the writer assuming a table created using
 * {@link #FAMILIES} as column family descriptors
 */
private void writerandomkeyvalues(RecordWriter<ImmutableBytesWritable,keyvalue> writer,i);
    random.nextBytes(valBytes);
    ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes);

    for (byte[] family : families) {
      keyvalue kv = new keyvalue(keyBytes,kv);
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件TestRecovery.java   
private void writeOutput(TaskAttempt attempt,conf);
  OutputCommitter committer = outputFormat.getoutputCommitter(tContext);
  committer.commitTask(tContext);
}
项目:es-hadoop-v2.2.0    文件Esstorage.java   
@SuppressWarnings({ "unchecked","rawtypes" })
@Override
public void preparetoWrite(RecordWriter writer) throws IOException {
    this.writer = writer;

    Properties props = getUDFProperties();
    String s = props.getProperty(ResourceSchema.class.getName());
    if (!StringUtils.hasText(s)) {
        log.warn("No resource schema found; using an empty one....");
        this.schema = new ResourceSchema();
    }
    else {
        this.schema = IoUtils.deserializefromBase64(s);
    }
    this.pigTuple = new PigTuple(schema);
}
项目:aliyun-oss-hadoop-fs    文件Chain.java   
/**
 * Add mapper(the first mapper) that reads input from the input
 * context and writes to queue
 */
@SuppressWarnings("unchecked")
void addMapper(TaskInputOutputContext inputContext,rw);
  threads.add(runner);
}
项目:aliyun-oss-hadoop-fs    文件Chain.java   
/**
 * Add mapper that reads and writes from/to the queue
 */
@SuppressWarnings("unchecked")
void addMapper(ChainBlockingQueue<keyvaluePair<?,rw);
  threads.add(runner);
}
项目:mnemonic    文件MneMapreduceLongDataTest.java   
@Test(enabled = true)
public void testWriteLongData() throws Exception {
  NullWritable nada = NullWritable.get();
  MneDurableOutputSession<Long> sess =
      new MneDurableOutputSession<Long>(m_tacontext,null,MneConfigHelper.DEFAULT_OUTPUT_CONfig_PREFIX);
  MneDurableOutputValue<Long> mdvalue =
      new MneDurableOutputValue<Long>(sess);
  OutputFormat<NullWritable,MneDurableOutputValue<Long>> outputFormat =
      new MneOutputFormat<MneDurableOutputValue<Long>>();
  RecordWriter<NullWritable,MneDurableOutputValue<Long>> writer =
      outputFormat.getRecordWriter(m_tacontext);
  Long val = null;
  for (int i = 0; i < m_reccnt; ++i) {
    val = m_rand.nextLong();
    m_sum += val;
    writer.write(nada,mdvalue.of(val));
  }
  writer.close(m_tacontext);
  sess.close();
}
项目:mnemonic    文件MneMapreducePersonDataTest.java   
@Test(enabled = true)
public void testWritePersonData() throws Exception {
  NullWritable nada = NullWritable.get();
  MneDurableOutputSession<Person<Long>> sess =
      new MneDurableOutputSession<Person<Long>>(m_tacontext,MneConfigHelper.DEFAULT_OUTPUT_CONfig_PREFIX);
  MneDurableOutputValue<Person<Long>> mdvalue =
      new MneDurableOutputValue<Person<Long>>(sess);
  OutputFormat<NullWritable,MneDurableOutputValue<Person<Long>>> outputFormat =
      new MneOutputFormat<MneDurableOutputValue<Person<Long>>>();
  RecordWriter<NullWritable,MneDurableOutputValue<Person<Long>>> writer =
      outputFormat.getRecordWriter(m_tacontext);
  Person<Long> person = null;
  for (int i = 0; i < m_reccnt; ++i) {
    person = sess.newDurableObjectRecord();
    person.setAge((short) m_rand.nextInt(50));
    person.setName(String.format("Name: [%s]",Utils.genRandomString()),true);
    m_sumage += person.getAge();
    writer.write(nada,mdvalue.of(person));
  }
  writer.close(m_tacontext);
  sess.close();
}
项目:aliyun-oss-hadoop-fs    文件TestFileOutputCommitter.java   
private void writeOutput(RecordWriter theRecordWriter,val2);
  } finally {
    theRecordWriter.close(context);
  }
}
项目:gora-boot    文件GoraOutputFormat.java   
@Override
@SuppressWarnings("unchecked")
public RecordWriter<K,T> getRecordWriter(TaskAttemptContext context)
    throws IOException,InterruptedException {
  Configuration conf = context.getConfiguration();
  Class<? extends DataStore<K,T>> dataStoreClass
    = (Class<? extends DataStore<K,T>>) conf.getClass(DATA_STORE_CLASS,null);
  Class<K> keyClass = (Class<K>) conf.getClass(OUTPUT_KEY_CLASS,null);
  Class<T> rowClass = (Class<T>) conf.getClass(OUTPUT_VALUE_CLASS,null);
  final DataStore<K,T> store =
    DataStoreFactory.createDataStore(dataStoreClass,keyClass,rowClass,context.getConfiguration());

  setoutputPath(store,context);

  return new GoraRecordWriter(store,context);
}
项目:SOAPgaea    文件GaeaVCFOutputFormat.java   
@Override public RecordWriter<K,VariantContextWritable> getRecordWriter(
        TaskAttemptContext context)
        throws IOException {
    final Configuration conf = ContextUtil.getConfiguration(context);
    initBaSEOF(conf);
    if (baSEOF.getHeader() == null) {
        if(conf.get(OUT_PATH_PROP) != null){
            final Path p = new Path(conf.get(OUT_PATH_PROP));
            baSEOF.readHeaderFrom(p,p.getFileSystem(conf));
        }
    }

    if(conf.getBoolean(GaeaVCFOutputFormat.HEADER_MODIFY,false)){
        final boolean wh = ContextUtil.getConfiguration(context).getBoolean(
                KeyIgnoringVCFOutputFormat.WRITE_HEADER_PROPERTY,true);
        return new GaeaKeyIgnoringVCFRecordWriter<K>(getDefaultWorkFile(context,""),baSEOF.getHeader(),wh,context);
    }

    return baSEOF.getRecordWriter(context,getDefaultWorkFile(context,""));
}
项目:mnemonic    文件MneMapreduceBufferDataTest.java   
@Test(enabled = true)
public void testWriteBufferData() throws Exception {
  NullWritable nada = NullWritable.get();
  MneDurableOutputSession<DurableBuffer<?>> sess =
      new MneDurableOutputSession<DurableBuffer<?>>(m_tacontext,MneConfigHelper.DEFAULT_OUTPUT_CONfig_PREFIX);
  MneDurableOutputValue<DurableBuffer<?>> mdvalue =
      new MneDurableOutputValue<DurableBuffer<?>>(sess);
  OutputFormat<NullWritable,MneDurableOutputValue<DurableBuffer<?>>> outputFormat =
      new MneOutputFormat<MneDurableOutputValue<DurableBuffer<?>>>();
  RecordWriter<NullWritable,MneDurableOutputValue<DurableBuffer<?>>> writer =
      outputFormat.getRecordWriter(m_tacontext);
  DurableBuffer<?> dbuf = null;
  Checksum cs = new CRC32();
  cs.reset();
  for (int i = 0; i < m_reccnt; ++i) {
    dbuf = genupdDurableBuffer(sess,cs);
    Assert.assertNotNull(dbuf);
    writer.write(nada,mdvalue.of(dbuf));
  }
  m_checksum = cs.getValue();
  writer.close(m_tacontext);
  sess.close();
}
项目:big-c    文件TestRecovery.java   
private void writeBadOutput(TaskAttempt attempt,conf);
  OutputCommitter committer = outputFormat.getoutputCommitter(tContext);
  committer.commitTask(tContext);
}
项目:big-c    文件TestRecovery.java   
private void writeOutput(TaskAttempt attempt,conf);
  OutputCommitter committer = outputFormat.getoutputCommitter(tContext);
  committer.commitTask(tContext);
}
项目:big-c    文件DBOutputFormat.java   
/** {@inheritDoc} */
public RecordWriter<K,statement);
  } catch (Exception ex) {
    throw new IOException(ex.getMessage());
  }
}
项目:big-c    文件Chain.java   
/**
 * Add mapper(the first mapper) that reads input from the input
 * context and writes to queue
 */
@SuppressWarnings("unchecked")
void addMapper(TaskInputOutputContext inputContext,rw);
  threads.add(runner);
}
项目:big-c    文件Chain.java   
/**
 * Add mapper that reads and writes from/to the queue
 */
@SuppressWarnings("unchecked")
void addMapper(ChainBlockingQueue<keyvaluePair<?,rw);
  threads.add(runner);
}
项目:hadoopoffice    文件ExcelFileOutputFormat.java   
@Override
public RecordWriter<NullWritable,SpreadSheetCellDAO> getRecordWriter(TaskAttemptContext context) throws IOException {
    // check if mimeType is set. If not assume new Excel format (.xlsx)
    Configuration conf=context.getConfiguration();
    String defaultConf=conf.get(HadoopOfficeWriteConfiguration.CONF_MIMETYPE,ExcelFileOutputFormat.DEFAULT_MIMETYPE);
    conf.set(HadoopOfficeWriteConfiguration.CONF_MIMETYPE,defaultConf);
    // add suffix   
    Path file = getDefaultWorkFile(context,ExcelFileOutputFormat.getSuffix(conf.get(HadoopOfficeWriteConfiguration.CONF_MIMETYPE)));


        try {
            return new ExcelRecordWriter<>(HadoopUtil.getDataOutputStream(conf,file,getCompressOutput(context),getoutputCompressorClass(context,ExcelFileOutputFormat.defaultCompressorClass)),file.getName(),conf);
        } catch (InvalidWriterConfigurationException | InvalidCellSpecificationException | FormatNotUnderstoodException
                | GeneralSecurityException | OfficeWriterException e) {
            LOG.error(e);
        }

    return null;
}
项目:big-c    文件TestFileOutputCommitter.java   
private void writeOutput(RecordWriter theRecordWriter,val2);
  } finally {
    theRecordWriter.close(context);
  }
}
项目:big-c    文件GridmixJob.java   
@Override
public RecordWriter<K,GridmixRecord> getRecordWriter(
    TaskAttemptContext job) throws IOException {

  Path file = getDefaultWorkFile(job,"");
  final DataOutputStream fileOut;

  fileOut = 
    new DataOutputStream(CompressionEmulationUtil
        .getPossiblyCompressedOutputStream(file,job.getConfiguration()));

  return new RecordWriter<K,GridmixRecord>() {
    @Override
    public void write(K ignored,GridmixRecord value)
        throws IOException {
      // Let the Gridmix record fill itself.
      value.write(fileOut);
    }
    @Override
    public void close(TaskAttemptContext ctxt) throws IOException {
      fileOut.close();
    }
  };
}
项目:multiple-dimension-spread    文件mdsParserOutputFormat.java   
@Override
public RecordWriter<NullWritable,IParser> getRecordWriter( final TaskAttemptContext taskAttemptContext ) throws IOException,InterruptedException{
  Configuration config = taskAttemptContext.getConfiguration(); 

  String extension = ".mds";
  Path file = getDefaultWorkFile( taskAttemptContext,extension );

  FileSystem fs = file.getFileSystem( config );
  long dfsBlockSize = Math.max( fs.getDefaultBlockSize( file ),1024 * 1024 * 256 );

  OutputStream out = fs.create( file,true,4096,fs.getDefaultReplication(file),dfsBlockSize );

  return new mdsParserRecordWriter( out,new jp.co.yahoo.dataplatform.config.Configuration() );
}
项目:ViraPipe    文件HDFSWriter.java   
@Override
public RecordWriter<NullWritable,SAMRecordWritable> getRecordWriter(TaskAttemptContext ctx,Path outputPath) throws IOException {
    // the writers require a header in order to create a codec,even if
    // the header isn't being written out
    setSAMHeader(samheader);
    setWriteHeader(writeHeader);

    return super.getRecordWriter(ctx,outputPath);
}
项目:aliyun-maxcompute-data-collectors    文件UpdateOutputFormat.java   
@Override
/** {@inheritDoc} */
public RecordWriter<K,V> getRecordWriter(TaskAttemptContext context)
    throws IOException {
  try {
    return new UpdateRecordWriter(context);
  } catch (Exception e) {
    throw new IOException(e);
  }
}
项目:aliyun-maxcompute-data-collectors    文件OracleExportOutputFormat.java   
@Override
/** {@inheritDoc} */
public RecordWriter<K,V> getRecordWriter(TaskAttemptContext context)
    throws IOException {
  try {
    return new OracleExportRecordWriter<K,V>(context);
  } catch (Exception e) {
    throw new IOException(e);
  }
}
项目:aliyun-maxcompute-data-collectors    文件CubridUpsertOutputFormat.java   
@Override
/** {@inheritDoc} */
public RecordWriter<K,V> getRecordWriter(TaskAttemptContext context)
    throws IOException {
  try {
    return new CubridUpsertRecordWriter(context);
  } catch (Exception e) {
    throw new IOException(e);
  }
}
项目:aliyun-maxcompute-data-collectors    文件OracleUpsertOutputFormat.java   
@Override
/** {@inheritDoc} */
public RecordWriter<K,V> getRecordWriter(TaskAttemptContext context)
    throws IOException {
  try {
    return new OracleUpsertRecordWriter(context);
  } catch (Exception e) {
    throw new IOException(e);
  }
}
项目:aliyun-maxcompute-data-collectors    文件DelegatingOutputFormat.java   
@Override
/** {@inheritDoc} */
public RecordWriter<K,V> getRecordWriter(TaskAttemptContext context)
    throws IOException {
  try {
    return new DelegatingRecordWriter(context);
  } catch (ClassNotFoundException cnfe) {
    throw new IOException(cnfe);
  }
}
项目:aliyun-maxcompute-data-collectors    文件ExportBatchOutputFormat.java   
@Override
/** {@inheritDoc} */
public RecordWriter<K,V> getRecordWriter(TaskAttemptContext context)
    throws IOException {
  try {
    return new ExportBatchRecordWriter<K,V>(context);
  } catch (Exception e) {
    throw new IOException(e);
  }
}
项目:aliyun-maxcompute-data-collectors    文件ExportCallOutputFormat.java   
@Override
/** {@inheritDoc} */
public RecordWriter<K,V> getRecordWriter(TaskAttemptContext context)
    throws IOException {
  try {
    return new ExportCallRecordWriter(context);
  } catch (Exception e) {
    throw new IOException(e);
  }
}
项目:aliyun-maxcompute-data-collectors    文件sqlServerResilientUpdateOutputFormat.java   
@Override
/** {@inheritDoc} */
public RecordWriter<K,V> getRecordWriter(TaskAttemptContext context)
    throws IOException {
  try {
    return new sqlServerUpdateRecordWriter(context);
  } catch (Exception e) {
    throw new IOException(e);
  }
}

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。