java.io.RandomAccessFile#write ( )源码实例Demo

下面列出了java.io.RandomAccessFile#write ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: birt   文件: EngineIRReaderTest.java
/**
 * Used to create the current stream wrote by current version EngineWriter.
 * The file will be stored in eclipse folder. Please copy it here and commit it to CVS.
 * 
 * now the version is 2
 * 
 * @throws Exception
 */
public void writeGolden( ) throws Exception
{
	//load the report design
	ReportParser parser = new ReportParser( );
	Report report = parser.parse( ".", this.getClass( )
			.getResourceAsStream( DESIGN_STREAM ) );
	assertTrue( report != null );

	// write it into the stream
	ByteArrayOutputStream out = new ByteArrayOutputStream( );
	new EngineIRWriter( ).write( out, report );
	out.close( );

	File file = new File( GOLDEN_V2_STREAM );
	if ( !file.exists( ) )
	{
		file.createNewFile( );
		RandomAccessFile rf = new RandomAccessFile( file, "rw" );
		rf.write( out.toByteArray( ) );
	}
}
 
源代码2 项目: dragonwell8_jdk   文件: TestRecordingFile.java
private static Path createBrokenWIthZeros(Path valid) throws Exception {
    try {
        Path broken = Files.createTempFile("broken-events", ".jfr");
        Files.delete(broken);
        Files.copy(valid, broken);
        RandomAccessFile raf = new RandomAccessFile(broken.toFile(), "rw");
        raf.seek(HEADER_SIZE);
        int size = (int) Files.size(broken);
        byte[] ones = new byte[size - HEADER_SIZE];
        for (int i = 0; i < ones.length; i++) {
            ones[i] = (byte) 0xFF;
        }
        raf.write(ones, 0, ones.length);
        raf.close();
        return broken;
    } catch (IOException ioe) {
        throw new Exception("Could not produce a broken file " + valid, ioe);
    }
}
 
源代码3 项目: jackrabbit-filevault   文件: ZipStreamArchive.java
/**
 * Copies the input stream to the buffer but check for overflow. If the buffer size is exceeded, the entire buffer
 * is copied to a random access file and the rest of the input stream is appended there.
 * @param in the input stream to copy
 * @return the number of bytes written to the destination.
 * @throws IOException if an I/O error occurrs.
 */
private long copyToBuffer(@NotNull InputStream in) throws IOException {
    int read;
    int total = 0;
    while ((read = in.read(decompressed, pos, decompressed.length - pos)) > 0) {
        total += read;
        pos += read;
        if (pos == decompressed.length) {
            // switch to raf
            tmpFile = File.createTempFile("__vlttmpbuffer", ".dat");
            raf = new RandomAccessFile(tmpFile, "rw");
            raf.write(decompressed);
            decompressed = null;
            return total + copyToRaf(in);
        }
    }
    return total;
}
 
源代码4 项目: jzab   文件: LogTest.java
void corruptFile(int position) throws Exception {
  File file = new File(getDirectory(), "transaction.log");
  RandomAccessFile ra = new RandomAccessFile(file, "rw");
  ra.seek(position);
  ra.write(0xff);
  ra.close();
}
 
源代码5 项目: j2objc   文件: RandomAccessFileTest.java
/**
 * java.io.RandomAccessFile#write(byte[], int, int)
 */
public void test_write$BII() throws IOException {
    // Test for method void java.io.RandomAccessFile.write(byte [], int,
    // int)
    RandomAccessFile raf = new java.io.RandomAccessFile(fileName, "rw");
    byte[] rbuf = new byte[4000];
    raf.write(fileString.getBytes(), 0, fileString.length());
    raf.close();
    FileInputStream fis = new java.io.FileInputStream(fileName);
    fis.read(rbuf, 0, fileString.length());
    assertEquals("Incorrect bytes written", fileString, new String(rbuf, 0,
            fileString.length()));
    fis.close();
}
 
源代码6 项目: universal-pokemon-randomizer   文件: NDSRom.java
private void copy(RandomAccessFile from, RandomAccessFile to, int bytes) throws IOException {
    int sizeof_copybuf = Math.min(256 * 1024, bytes);
    byte[] copybuf = new byte[sizeof_copybuf];
    while (bytes > 0) {
        int size2 = (bytes >= sizeof_copybuf) ? sizeof_copybuf : bytes;
        int read = from.read(copybuf, 0, size2);
        to.write(copybuf, 0, read);
        bytes -= read;
    }
    copybuf = null;
}
 
源代码7 项目: TencentKona-8   文件: EvilInstrument.java
public byte[] transform(ClassLoader loader, String className,
                        Class<?> classBeingRedefined,
                        ProtectionDomain protectionDomain,
                        byte[] classfileBuffer)
{
    if (!inited) {
        return null;
    }
    // Do i/o operations during every transform call.
    try {
        FileOutputStream fos = new FileOutputStream(scratch);
        fos.write(31);
        fos.close();

        FileInputStream fis = new FileInputStream(scratch);
        fis.read();
        fis.close();

        RandomAccessFile raf = new RandomAccessFile(scratch, "rw");
        raf.read();
        raf.write(31);
        raf.close();

        s.getOutputStream().write(31);
        s.getInputStream().read();

    } catch(Exception ex) {
        ex.printStackTrace();
        System.exit(1);
    }
    return null;
}
 
源代码8 项目: sgdtk   文件: FeatureVector.java
/**
 * Write out the FeatureVector to a file at the current offset, using the working buffer.  The buffer
 * should be pre-sized using getSerializationSize(), or it can be null, in which case it will be allocated
 *
 * @param output A file open at the desired write offset
 * @param buffer A working buffer of at least the required number of bytes or null
 * @throws IOException
 */
public void serializeTo(RandomAccessFile output, byte[] buffer) throws IOException
{
    UnsafeMemory memory = serialize(buffer);
    // Write bytes out
    long sz = memory.getPos();
    output.writeLong(sz);
    output.write(memory.getBuffer(), 0, (int) sz);
}
 
源代码9 项目: BPlusTree   文件: TreeOverflow.java
/**
 *
 * Overflow node write structure is as follows:
 *
 *  -- node type -- (2 bytes)
 *  -- next pointer -- (8 bytes)
 *  -- prev pointer -- (8 bytes)
 *  -- values -- (max size * satellite size)
 *
 * @param r pointer to *opened* B+ tree file
 * @throws IOException is thrown when an I/O operation fails
 */
@Override
public void writeNode(RandomAccessFile r, BPlusConfiguration conf,
                      BPlusTreePerformanceCounter bPerf)
        throws IOException {
    // account for the header page as well.
    r.seek(getPageIndex());

    // now write the node type
    r.writeShort(getPageType());

    // write the next pointer
    r.writeLong(nextPagePointer);

    // write the prev pointer
    r.writeLong(prevPagePointer);

    // then write the current capacity
    r.writeInt(getCurrentCapacity());

    // now write the values
    for(int i = 0; i < getCurrentCapacity(); i++)
        {r.write(valueList.get(i).getBytes(StandardCharsets.UTF_8));}

    // annoying correction
    if(r.length() < getPageIndex()+conf.getPageSize())
        {r.setLength(getPageIndex()+conf.getPageSize());}

    bPerf.incrementTotalOverflowNodeWrites();
}
 
源代码10 项目: pumpernickel   文件: BmpEncoder.java
RandomAccessDataModel(File file, int totalBytes, int width, int height,
		int bitsPerPixel) throws IOException {
	// this.file = file;
	randomAccessFile = new RandomAccessFile(file, "rw");
	randomAccessFile.setLength(totalBytes);

	byte[] header = new byte[HEADER_SIZE];
	writeHeader(header, 0, width, height, bitsPerPixel);
	randomAccessFile.write(header, 0, header.length);
}
 
源代码11 项目: Telegram-FOSS   文件: TeeAudioProcessor.java
private void writeBuffer(ByteBuffer buffer) throws IOException {
  RandomAccessFile randomAccessFile = Assertions.checkNotNull(this.randomAccessFile);
  while (buffer.hasRemaining()) {
    int bytesToWrite = Math.min(buffer.remaining(), scratchBuffer.length);
    buffer.get(scratchBuffer, 0, bytesToWrite);
    randomAccessFile.write(scratchBuffer, 0, bytesToWrite);
    bytesWritten += bytesToWrite;
  }
}
 
源代码12 项目: j2objc   文件: OldRandomAccessFileTest.java
/**
 * java.io.RandomAccessFile#length()
 */
public void test_length() throws IOException {
    // Test for method long java.io.RandomAccessFile.length()
    RandomAccessFile raf = new java.io.RandomAccessFile(fileName, "rw");
    raf.write(testString.getBytes());
    assertEquals("Test 1: Incorrect length returned. ", testLength,
            raf.length());
    raf.close();
    try {
        raf.length();
        fail("Test 2: IOException expected.");
    } catch (IOException e) {
        // Expected.
    }
}
 
源代码13 项目: openjdk-jdk8u   文件: EvilInstrument.java
public byte[] transform(ClassLoader loader, String className,
                        Class<?> classBeingRedefined,
                        ProtectionDomain protectionDomain,
                        byte[] classfileBuffer)
{
    if (!inited) {
        return null;
    }
    // Do i/o operations during every transform call.
    try {
        FileOutputStream fos = new FileOutputStream(scratch);
        fos.write(31);
        fos.close();

        FileInputStream fis = new FileInputStream(scratch);
        fis.read();
        fis.close();

        RandomAccessFile raf = new RandomAccessFile(scratch, "rw");
        raf.read();
        raf.write(31);
        raf.close();

        s.getOutputStream().write(31);
        s.getInputStream().read();

    } catch(Exception ex) {
        ex.printStackTrace();
        System.exit(1);
    }
    return null;
}
 
源代码14 项目: big-c   文件: TestFsck.java
/**
 * Test for blockIdCK with block corruption
 */
@Test
public void testBlockIdCKCorruption() throws Exception {
  short NUM_DN = 1;
  final long blockSize = 512;
  Random random = new Random();
  DFSClient dfsClient;
  LocatedBlocks blocks;
  ExtendedBlock block;
  short repFactor = 1;
  String [] racks = {"/rack1"};
  String [] hosts = {"host1"};

  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  // Set short retry timeouts so this test runs faster
  conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  try {
    cluster =
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
            .racks(racks).build();

    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);

    DFSTestUtil util = new DFSTestUtil.Builder().
      setName(getClass().getSimpleName()).setNumFiles(1).build();
    //create files
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    util.createFile(dfs, path, 1024, repFactor, 1000L);
    util.waitReplication(dfs, path, repFactor);
    StringBuilder sb = new StringBuilder();
    for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
      sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
    }
    String[] bIds = sb.toString().split(" ");

    //make sure block is healthy before we corrupt it
    String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));

    // corrupt replicas
    block = DFSTestUtil.getFirstBlock(dfs, path);
    File blockFile = cluster.getBlockFile(0, block);
    if (blockFile != null && blockFile.exists()) {
      RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
      FileChannel channel = raFile.getChannel();
      String badString = "BADBAD";
      int rand = random.nextInt((int) channel.size()/2);
      raFile.seek(rand);
      raFile.write(badString.getBytes());
      raFile.close();
    }

    util.waitCorruptReplicas(dfs, cluster.getNamesystem(), path, block, 1);

    outStr = runFsck(conf, 1, false, "/", "-blockId", block.getBlockName());
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
源代码15 项目: Nukkit   文件: RegionLoader.java
@Override
public int doSlowCleanUp() throws Exception {
    RandomAccessFile raf = this.getRandomAccessFile();
    for (int i = 0; i < 1024; i++) {
        Integer[] table = this.locationTable.get(i);
        if (table[0] == 0 || table[1] == 0) {
            continue;
        }
        raf.seek(table[0] << 12);
        byte[] chunk = new byte[table[1] << 12];
        raf.readFully(chunk);
        int length = Binary.readInt(Arrays.copyOfRange(chunk, 0, 3));
        if (length <= 1) {
            this.locationTable.put(i, (table = new Integer[]{0, 0, 0}));
        }
        try {
            chunk = Zlib.inflate(Arrays.copyOf(chunk, 5));
        } catch (Exception e) {
            this.locationTable.put(i, new Integer[]{0, 0, 0});
            continue;
        }
        chunk = Zlib.deflate(chunk, 9);
        ByteBuffer buffer = ByteBuffer.allocate(4 + 1 + chunk.length);
        buffer.put(Binary.writeInt(chunk.length + 1));
        buffer.put(COMPRESSION_ZLIB);
        buffer.put(chunk);
        chunk = buffer.array();
        int sectors = (int) Math.ceil(chunk.length / 4096d);
        if (sectors > table[1]) {
            table[0] = this.lastSector + 1;
            this.lastSector += sectors;
            this.locationTable.put(i, table);
        }
        raf.seek(table[0] << 12);
        byte[] bytes = new byte[sectors << 12];
        ByteBuffer buffer1 = ByteBuffer.wrap(bytes);
        buffer1.put(chunk);
        raf.write(buffer1.array());
    }
    this.writeLocationTable();
    int n = this.cleanGarbage();
    this.writeLocationTable();
    return n;
}
 
源代码16 项目: openjdk-jdk8u-backup   文件: Basic.java
public static void main(String[] args) throws Exception {

        show(nonExistantFile);
        if (nonExistantFile.exists()) fail(nonExistantFile, "exists");

        show(rwFile);
        testFile(rwFile, true, 6);
        rwFile.delete();
        if (rwFile.exists())
            fail(rwFile, "could not delete");

        show(roFile);
        testFile(roFile, false, 0);

        show(thisDir);
        if (!thisDir.exists()) fail(thisDir, "does not exist");
        if (thisDir.isFile()) fail(thisDir, "is a file");
        if (!thisDir.isDirectory()) fail(thisDir, "is not a directory");
        if (!thisDir.canRead()) fail(thisDir, "is readable");
        if (!thisDir.canWrite()) fail(thisDir, "is writeable");
        String[] fs = thisDir.list();
        if (fs == null) fail(thisDir, "list() returned null");
        out.print("  [" + fs.length + "]");
        for (int i = 0; i < fs.length; i++)
            out.print(" " + fs[i]);
        out.println();
        if (fs.length == 0) fail(thisDir, "is empty");

        if (!nonExistantFile.createNewFile())
            fail(nonExistantFile, "could not create");
        nonExistantFile.deleteOnExit();

        if (!nonDir.mkdir())
            fail(nonDir, "could not create");

        if (!dir.renameTo(new File("x.Basic.dir2")))
            fail(dir, "failed to rename");

        if (System.getProperty("os.name").equals("SunOS")
            && System.getProperty("os.version").compareTo("5.6") >= 0) {
            if (bigFile.exists()) {
                bigFile.delete();
                if (bigFile.exists())
                    fail(bigFile, "could not delete");
            }
            RandomAccessFile raf = new RandomAccessFile(bigFile, "rw");
            long big = ((long)Integer.MAX_VALUE) * 2;
            try {
                raf.seek(big);
                raf.write('x');
                show(bigFile);
                testFile(bigFile, true, big + 1);
            } finally {
                raf.close();
            }
            bigFile.delete();
            if (bigFile.exists())
                fail(bigFile, "could not delete");
        } else {
            System.err.println("NOTE: Large files not supported on this system");
        }

    }
 
源代码17 项目: Nukkit   文件: RegionLoader.java
@Override
protected void saveChunk(int x, int z, byte[] chunkData) throws IOException {
    int length = chunkData.length + 1;
    if (length + 4 > MAX_SECTOR_LENGTH) {
        throw new ChunkException("Chunk is too big! " + (length + 4) + " > " + MAX_SECTOR_LENGTH);
    }
    int sectors = (int) Math.ceil((length + 4) / 4096d);
    int index = getChunkOffset(x, z);
    boolean indexChanged = false;
    Integer[] table = this.locationTable.get(index);

    if (table[1] < sectors) {
        table[0] = this.lastSector + 1;
        this.locationTable.put(index, table);
        this.lastSector += sectors;
        indexChanged = true;
    } else if (table[1] != sectors) {
        indexChanged = true;
    }

    table[1] = sectors;
    table[2] = (int) (System.currentTimeMillis() / 1000d);

    this.locationTable.put(index, table);

    RandomAccessFile raf = this.getRandomAccessFile();
    raf.seek(table[0] << 12);

    BinaryStream stream = new BinaryStream();
    stream.put(Binary.writeInt(length));
    stream.putByte(COMPRESSION_ZLIB);
    stream.put(chunkData);
    byte[] data = stream.getBuffer();
    if (data.length < sectors << 12) {
        byte[] newData = new byte[sectors << 12];
        System.arraycopy(data, 0, newData, 0, data.length);
        data = newData;
    }

    raf.write(data);

    if (indexChanged) {
        this.writeLocationIndex(index);
    }

}
 
源代码18 项目: hadoop   文件: TestReplication.java
@Test
public void testPendingReplicationRetry() throws IOException {
  
  MiniDFSCluster cluster = null;
  int numDataNodes = 4;
  String testFile = "/replication-test-file";
  Path testPath = new Path(testFile);
  
  byte buffer[] = new byte[1024];
  for (int i=0; i<buffer.length; i++) {
    buffer[i] = '1';
  }
  
  try {
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, Integer.toString(numDataNodes));
    //first time format
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
    cluster.waitActive();
    DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
                                          cluster.getNameNodePort()),
                                          conf);
    
    OutputStream out = cluster.getFileSystem().create(testPath);
    out.write(buffer);
    out.close();
    
    waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, -1);

    // get first block of the file.
    ExtendedBlock block = dfsClient.getNamenode().getBlockLocations(testFile,
        0, Long.MAX_VALUE).get(0).getBlock();
    
    cluster.shutdown();
    
    for (int i=0; i<25; i++) {
      buffer[i] = '0';
    }
    
    int fileCount = 0;
    // Choose 3 copies of block file - delete 1 and corrupt the remaining 2
    for (int dnIndex=0; dnIndex<3; dnIndex++) {
      File blockFile = cluster.getBlockFile(dnIndex, block);
      LOG.info("Checking for file " + blockFile);
      
      if (blockFile != null && blockFile.exists()) {
        if (fileCount == 0) {
          LOG.info("Deleting file " + blockFile);
          assertTrue(blockFile.delete());
        } else {
          // corrupt it.
          LOG.info("Corrupting file " + blockFile);
          long len = blockFile.length();
          assertTrue(len > 50);
          RandomAccessFile blockOut = new RandomAccessFile(blockFile, "rw");
          try {
            blockOut.seek(len/3);
            blockOut.write(buffer, 0, 25);
          } finally {
            blockOut.close();
          }
        }
        fileCount++;
      }
    }
    assertEquals(3, fileCount);
    
    /* Start the MiniDFSCluster with more datanodes since once a writeBlock
     * to a datanode node fails, same block can not be written to it
     * immediately. In our case some replication attempts will fail.
     */
    
    LOG.info("Restarting minicluster after deleting a replica and corrupting 2 crcs");
    conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, Integer.toString(numDataNodes));
    conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
    conf.set("dfs.datanode.block.write.timeout.sec", Integer.toString(5));
    conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.75f"); // only 3 copies exist
    
    cluster = new MiniDFSCluster.Builder(conf)
                                .numDataNodes(numDataNodes * 2)
                                .format(false)
                                .build();
    cluster.waitActive();
    
    dfsClient = new DFSClient(new InetSocketAddress("localhost",
                                cluster.getNameNodePort()),
                                conf);
    
    waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, -1);
    
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }  
}
 
源代码19 项目: org.alloytools.alloy   文件: Util.java
/**
 * Copy file.content[from...f.length-1] into file.content[to...], then truncate
 * the file after that point.
 * <p>
 * If (from &gt; to), this means we simply delete the portion of the file
 * beginning at "to" and up to but excluding "from".
 * <p>
 * If (from &lt; to), this means we insert (to-from) number of ARBITRARY bytes
 * into the "from" location and shift the original file content accordingly.
 * <p>
 * Note: after this operation, the file's current position will be moved to the
 * start of the file.
 *
 * @throws IOException if (from &lt; 0) || (to &lt; 0) || (from &gt;=
 *             file.length())
 */
public static void shift(RandomAccessFile file, long from, long to) throws IOException {
    long total = file.length();
    if (from < 0 || from >= total || to < 0)
        throw new IOException();
    else if (from == to) {
        file.seek(0);
        return;
    }
    final byte buf[] = new byte[4096];
    int res;
    if (from > to) {
        while (true) {
            file.seek(from);
            if ((res = file.read(buf)) <= 0) {
                file.setLength(to);
                file.seek(0);
                return;
            }
            file.seek(to);
            file.write(buf, 0, res);
            from = from + res;
            to = to + res;
        }
    } else {
        file.seek(total);
        for (long todo = to - from; todo > 0;) {
            if (todo >= buf.length) {
                file.write(buf);
                todo = todo - buf.length;
            } else {
                file.write(buf, 0, (int) todo);
                break;
            }
        }
        for (long todo = total - from; todo > 0; total = total - res, todo = todo - res) {
            if (todo > buf.length)
                res = buf.length;
            else
                res = (int) todo;
            file.seek(total - res);
            for (int done = 0; done < res;) {
                int r = file.read(buf, done, res - done);
                if (r <= 0)
                    throw new IOException();
                else
                    done += r;
            }
            file.seek(total - res + (to - from));
            file.write(buf, 0, res);
        }
    }
    file.seek(0);
}
 
源代码20 项目: hadoop   文件: FsDatasetImpl.java
static private void truncateBlock(File blockFile, File metaFile,
    long oldlen, long newlen) throws IOException {
  LOG.info("truncateBlock: blockFile=" + blockFile
      + ", metaFile=" + metaFile
      + ", oldlen=" + oldlen
      + ", newlen=" + newlen);

  if (newlen == oldlen) {
    return;
  }
  if (newlen > oldlen) {
    throw new IOException("Cannot truncate block to from oldlen (=" + oldlen
        + ") to newlen (=" + newlen + ")");
  }

  DataChecksum dcs = BlockMetadataHeader.readHeader(metaFile).getChecksum(); 
  int checksumsize = dcs.getChecksumSize();
  int bpc = dcs.getBytesPerChecksum();
  long n = (newlen - 1)/bpc + 1;
  long newmetalen = BlockMetadataHeader.getHeaderSize() + n*checksumsize;
  long lastchunkoffset = (n - 1)*bpc;
  int lastchunksize = (int)(newlen - lastchunkoffset); 
  byte[] b = new byte[Math.max(lastchunksize, checksumsize)]; 

  RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw");
  try {
    //truncate blockFile 
    blockRAF.setLength(newlen);
 
    //read last chunk
    blockRAF.seek(lastchunkoffset);
    blockRAF.readFully(b, 0, lastchunksize);
  } finally {
    blockRAF.close();
  }

  //compute checksum
  dcs.update(b, 0, lastchunksize);
  dcs.writeValue(b, 0, false);

  //update metaFile 
  RandomAccessFile metaRAF = new RandomAccessFile(metaFile, "rw");
  try {
    metaRAF.setLength(newmetalen);
    metaRAF.seek(newmetalen - checksumsize);
    metaRAF.write(b, 0, checksumsize);
  } finally {
    metaRAF.close();
  }
}