下面列出了java.nio.MappedByteBuffer#getInt ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
private void damageCommitlog(long offset) throws Exception {
MessageStoreConfig messageStoreConfig = new MessageStoreConfig();
File file = new File(messageStoreConfig.getStorePathCommitLog() + File.separator + "00000000000000000000");
FileChannel fileChannel = new RandomAccessFile(file, "rw").getChannel();
MappedByteBuffer mappedByteBuffer = fileChannel.map(FileChannel.MapMode.READ_WRITE, 0, 1024 * 1024 * 10);
int bodyLen = mappedByteBuffer.getInt((int) offset + 84);
int topicLenIndex = (int) offset + 84 + bodyLen + 4;
mappedByteBuffer.position(topicLenIndex);
mappedByteBuffer.putInt(0);
mappedByteBuffer.putInt(0);
mappedByteBuffer.putInt(0);
mappedByteBuffer.putInt(0);
mappedByteBuffer.force();
fileChannel.force(true);
fileChannel.close();
}
private void damageCommitlog(long offset) throws Exception {
MessageStoreConfig messageStoreConfig = new MessageStoreConfig();
File file = new File(messageStoreConfig.getStorePathCommitLog() + File.separator + "00000000000000000000");
FileChannel fileChannel = new RandomAccessFile(file, "rw").getChannel();
MappedByteBuffer mappedByteBuffer = fileChannel.map(FileChannel.MapMode.READ_WRITE, 0, 1024 * 1024 * 10);
int bodyLen = mappedByteBuffer.getInt((int) offset + 84);
int topicLenIndex = (int) offset + 84 + bodyLen + 4;
mappedByteBuffer.position(topicLenIndex);
mappedByteBuffer.putInt(0);
mappedByteBuffer.putInt(0);
mappedByteBuffer.putInt(0);
mappedByteBuffer.putInt(0);
mappedByteBuffer.force();
fileChannel.force(true);
fileChannel.close();
}
private void pageIn() throws IndexOutOfBoundsException {
if (!pagedIn) {
// Map the memory region
MappedByteBuffer buffer = spillFile.getPage(pageIndex);
int numElements = buffer.getInt();
for (int i = 0; i < numElements; i++) {
int kvSize = buffer.getInt();
byte[] data = new byte[kvSize];
buffer.get(data, 0, kvSize);
try {
pageMap.put(SpillManager.getKey(data), data);
totalResultSize += (data.length + Bytes.SIZEOF_INT);
} catch (IOException ioe) {
// Error during key access on spilled resource
// TODO rework error handling
throw new RuntimeException(ioe);
}
}
pagedIn = true;
dirtyPage = false;
}
}
private void damageCommitlog(long offset) throws Exception {
MessageStoreConfig messageStoreConfig = new MessageStoreConfig();
File file = new File(messageStoreConfig.getStorePathCommitLog() + File.separator + "00000000000000000000");
FileChannel fileChannel = new RandomAccessFile(file, "rw").getChannel();
MappedByteBuffer mappedByteBuffer = fileChannel.map(FileChannel.MapMode.READ_WRITE, 0, 1024 * 1024 * 10);
int bodyLen = mappedByteBuffer.getInt((int) offset + 84);
int topicLenIndex = (int) offset + 84 + bodyLen + 4;
mappedByteBuffer.position(topicLenIndex);
mappedByteBuffer.putInt(0);
mappedByteBuffer.putInt(0);
mappedByteBuffer.putInt(0);
mappedByteBuffer.putInt(0);
mappedByteBuffer.force();
fileChannel.force(true);
fileChannel.close();
}
private void damageCommitlog(long offset) throws Exception {
MessageStoreConfig messageStoreConfig = new MessageStoreConfig();
File file = new File(messageStoreConfig.getStorePathCommitLog() + File.separator + "00000000000000000000");
FileChannel fileChannel = new RandomAccessFile(file, "rw").getChannel();
MappedByteBuffer mappedByteBuffer = fileChannel.map(FileChannel.MapMode.READ_WRITE, 0, 1024 * 1024 * 10);
int bodyLen = mappedByteBuffer.getInt((int) offset + 84);
int topicLenIndex = (int) offset + 84 + bodyLen + 4;
mappedByteBuffer.position(topicLenIndex);
mappedByteBuffer.putInt(0);
mappedByteBuffer.putInt(0);
mappedByteBuffer.putInt(0);
mappedByteBuffer.putInt(0);
mappedByteBuffer.force();
fileChannel.force(true);
fileChannel.close();
}
private void pageIn() throws IndexOutOfBoundsException {
if (!pagedIn) {
// Map the memory region
MappedByteBuffer buffer = spillFile.getPage(pageIndex);
int numElements = buffer.getInt();
for (int i = 0; i < numElements; i++) {
int kvSize = buffer.getInt();
byte[] data = new byte[kvSize];
buffer.get(data, 0, kvSize);
try {
pageMap.put(SpillManager.getKey(data), data);
totalResultSize += (data.length + Bytes.SIZEOF_INT);
} catch (IOException ioe) {
// Error during key access on spilled resource
// TODO rework error handling
throw new RuntimeException(ioe);
}
}
pagedIn = true;
dirtyPage = false;
}
}
@Override
public int readLittleEndianInt() throws IOException {
MappedByteBuffer curChunk = getCurChunk();
if (curChunk.remaining() >= 4) {
return curChunk.getInt();
}
// Value is on the chunk boundary - edge case so it is ok if it's a bit slower.
return Util.readLittleEndianIntSlowly(this);
}
/**
* check that each controlled level is either ON or OFF.
**/
public static boolean checkOnOff(MappedByteBuffer mapBuf,
int offset)
{
for (int i = 0; i < VespaLevelControllerRepo.numLevels; i++) {
int off = offset + 4 * i;
int val = mapBuf.getInt(off);
if (val != ONVAL && val != OFFVAL) {
System.err.println("bad on/off value: "+val);
return false;
}
}
return true;
}
@NoWarning("RCN,NP")
public int foo() throws IOException {
try (FileChannel c = open()) {
final MappedByteBuffer mb
= c.map(MapMode.READ_ONLY, 0L, c.size());
return mb.getInt();
}
}
private DoubleVector parse(MappedByteBuffer buf) {
int dim = (int) (size / 4);
DoubleVector v = new DenseDoubleVector(dim);
for (int i = 0; i < v.getDimension(); i++) {
int n = buf.getInt();
v.set(i, Float.intBitsToFloat(n));
}
return v;
}
@Override
public int readLittleEndianInt() throws IOException {
MappedByteBuffer curChunk = getCurChunk();
if (curChunk.remaining() >= 4) {
return curChunk.getInt();
}
// Value is on the chunk boundary - edge case so it is ok if it's a bit slower.
return Util.readLittleEndianIntSlowly(this);
}
@Override
protected Tuple readFromBuffer(MappedByteBuffer buffer) {
int length = buffer.getInt();
if (length < 0)
return null;
byte[] b = new byte[length];
buffer.get(b);
Result result = ResultUtil.toResult(new ImmutableBytesWritable(b));
return new ResultTuple(result);
}
public static void main(String[] args) throws IOException {
File stackLog = new File("target/stack-logs.78490.2000.unidbg.zcmkle.index");
FileInputStream inputStream = new FileInputStream(stackLog);
FileChannel channel = inputStream.getChannel();
MappedByteBuffer buffer = channel.map(FileChannel.MapMode.READ_ONLY, 0, stackLog.length());
buffer.order(ByteOrder.LITTLE_ENDIAN);
int i = 0;
while (buffer.remaining() >= 16) {
long size = buffer.getInt() & 0xffffffffL;
long addr = (buffer.getInt() & 0xffffffffL) ^ 0x00005555;
long offset_and_flags_l = buffer.getInt() & 0xffffffffL;
long offset_and_flags_h = buffer.getInt() & 0xffffffffL;
int flag = (int) ((offset_and_flags_h & 0xff000000) >> 24);
long stackId = ((offset_and_flags_h & 0x00ffffff) << 32) | offset_and_flags_l;
String action = "OTHER";
boolean isFree = false;
switch (flag) {
case MALLOC_LOG_TYPE_ALLOCATE:
action = "ALLOC";
isFree = false;
break;
case MALLOC_LOG_TYPE_DEALLOCATE:
action = "FREE ";
isFree = true;
break;
case stack_logging_type_vm_allocate:
action = "MMAP ";
isFree = false;
break;
case stack_logging_type_vm_deallocate:
action = "UNMAP";
isFree = true;
break;
default:
if ((flag & stack_logging_type_mapped_file_or_shared_mem) != 0 && (flag & stack_logging_type_vm_allocate) != 0) {
action = "MMAPF";
isFree = false;
break;
}
System.err.println(flag);
break;
}
String msg = String.format("[%08d]: %s, stackId=0x%014x, address=0x%08x, size=0x%x", i++, action, stackId, addr, size);
if (isFree) {
System.err.println(msg);
} else {
System.out.println(msg);
}
}
channel.close();
inputStream.close();
}
protected RecordEntry getKeyValueTable(long table_pos, ByteString key)
{
int hash_file = (int) (table_pos / SEGMENT_FILE_SIZE);
MappedByteBuffer hash_mbb = getBufferMap(hash_file);
int file_offset = (int) (table_pos % SEGMENT_FILE_SIZE);
int max;
int items;
long next_ptr;
synchronized(hash_mbb)
{
hash_mbb.position(file_offset + (int) LOCATION_HASH_MAX);
max = hash_mbb.getInt();
items = hash_mbb.getInt();
next_ptr = hash_mbb.getLong();
}
Assert.assertTrue("Max " + max + " items " + items + " table at " + table_pos + " file " + hash_file + " file offset " + file_offset ,max > items);
Assert.assertTrue(max > 4);
Assert.assertTrue(items >= 0);
int hash = key.hashCode();
int loc = Math.abs(hash % max);
if (loc < 0) loc = 0;
//DeterministicStream det_stream = new DeterministicStream(key);
//int loc = det_stream.nextInt(max);
while(true)
{
Assert.assertTrue(loc >= 0);
Assert.assertTrue(loc < max);
synchronized(hash_mbb)
{
hash_mbb.position(file_offset + LOCATION_HASH_START + loc * 8);
long ptr = hash_mbb.getLong();
if (ptr != 0)
{
RecordEntry re = new RecordEntry(ptr);
if (re.getKey().equals(key)) return re;
}
if (ptr == 0)
{
if (next_ptr != 0)
{
return getKeyValueTable(next_ptr, key);
}
else
{
return null;
}
}
}
//loc = det_stream.nextInt(max);
loc = (loc + 131 ) % max;
}
}
public static void main2(String[] args) throws Exception {
//FileChannel.open()
RandomAccessFile f = new RandomAccessFile("testdata/float.martix", "rw");
FileChannel fileChannel = f.getChannel();
MappedByteBuffer buffer = fileChannel.map(FileChannel.MapMode.READ_WRITE, 0, fileChannel.size());
buffer.getInt();
System.out.println(buffer);
buffer.getLong();
System.out.println(buffer);
buffer.getChar();
System.out.println(buffer);
float[] floats = new float[300];
buffer.asFloatBuffer().get(floats);
System.out.println(Arrays.toString(floats));
System.out.println(buffer);
// FloatBuffer floatBuffer = buffer.asFloatBuffer();
//
//
// float[] floats = new float[300];
//
// floatBuffer.position(0);
// floatBuffer.get(floats);
// System.out.println(floatBuffer);
// System.out.println(Arrays.toString(floats));
// System.out.println(buffer);
//
//
//
// System.out.println("----"+buffer.asLongBuffer());
// System.out.println(buffer.asFloatBuffer());
// System.out.println(buffer.position(1200));
// System.out.println(buffer.slice().get(0));
// Random random = new Random();
//
// long t1 = System.currentTimeMillis();
// for (int i = 0; i < 1000000; i++) {
// floatBuffer.position(random.nextInt(1000000)*300);
// floatBuffer.get(floats);
// }
// long t2 = System.currentTimeMillis();
//
// System.out.println(t2-t1);
//TimeUnit.SECONDS.sleep(10);
fileChannel.close();
}
public ZipIterable(Path path) throws IOException {
this.path = path;
this.chan = FileChannel.open(path, StandardOpenOption.READ);
// Locate the EOCD
long size = chan.size();
if (size < ENDHDR) {
throw new ZipException("invalid zip archive");
}
long eocdOffset = size - ENDHDR;
MappedByteBuffer eocd = chan.map(MapMode.READ_ONLY, eocdOffset, ENDHDR);
eocd.order(ByteOrder.LITTLE_ENDIAN);
int index = 0;
int commentSize = 0;
if (!isSignature(eocd, 0, 5, 6)) {
// The archive may contain a zip file comment; keep looking for the EOCD.
long start = Math.max(0, size - ENDHDR - 0xFFFF);
eocd = chan.map(MapMode.READ_ONLY, start, (size - start));
eocd.order(ByteOrder.LITTLE_ENDIAN);
index = (int) ((size - start) - ENDHDR);
while (index > 0) {
index--;
eocd.position(index);
if (isSignature(eocd, index, 5, 6)) {
commentSize = (int) ((size - start) - ENDHDR) - index;
eocdOffset = start + index;
break;
}
}
}
checkSignature(path, eocd, index, 5, 6, "ENDSIG");
int totalEntries = eocd.getChar(index + ENDTOT);
long cdsize = UnsignedInts.toLong(eocd.getInt(index + ENDSIZ));
int actualCommentSize = eocd.getChar(index + ENDCOM);
if (commentSize != actualCommentSize) {
throw new ZipException(
String.format(
"zip file comment length was %d, expected %d", commentSize, actualCommentSize));
}
// If the number of entries is 0xffff, check if the archive has a zip64 EOCD locator.
if (totalEntries == ZIP64_MAGICCOUNT) {
// Assume the zip64 EOCD has the usual size; we don't support zip64 extensible data sectors.
long zip64eocdOffset = size - ENDHDR - ZIP64_LOCHDR - ZIP64_ENDHDR;
MappedByteBuffer zip64eocd = chan.map(MapMode.READ_ONLY, zip64eocdOffset, ZIP64_ENDHDR);
zip64eocd.order(ByteOrder.LITTLE_ENDIAN);
// Note that zip reading is necessarily best-effort, since an archive could contain 0xFFFF
// entries and the last entry's data could contain a ZIP64_ENDSIG. Some implementations
// read the full EOCD records and compare them.
if (zip64eocd.getInt(0) == ZIP64_ENDSIG) {
cdsize = zip64eocd.getLong(ZIP64_ENDSIZ);
eocdOffset = zip64eocdOffset;
}
}
this.cd = chan.map(MapMode.READ_ONLY, eocdOffset - cdsize, cdsize);
cd.order(ByteOrder.LITTLE_ENDIAN);
}
private void getTableStats(long table_pos, Map<String, Long> map)
{
map.put("tables", map.get("tables") + 1);
int hash_file = (int) (table_pos / SEGMENT_FILE_SIZE);
MappedByteBuffer hash_mbb = getBufferMap(hash_file);
int file_offset = (int) (table_pos % SEGMENT_FILE_SIZE);
int max;
int items;
long next_ptr;
synchronized(hash_mbb)
{
hash_mbb.position(file_offset + (int) LOCATION_HASH_MAX);
max = hash_mbb.getInt();
items = hash_mbb.getInt();
next_ptr = hash_mbb.getLong();
hash_mbb.position(file_offset + (int) LOCATION_HASH_START);
for(int i=0; i<max; i++)
{
long ptr = hash_mbb.getLong(file_offset + LOCATION_HASH_START + i*8);
if (ptr != 0)
{
RecordEntry re = new RecordEntry(ptr);
ByteString key = re.getKey();
ByteString value = re.getValue();
map.put("key_size", map.get("key_size") + key.size());
map.put("data_size", map.get("data_size") + value.size());
}
}
}
map.put("items", map.get("items") + items);
if (next_ptr != 0)
{
getTableStats(next_ptr, map);
}
}
protected void putKeyValueTable(long table_pos, RecordEntry put_re)
{
long t1=System.nanoTime();
int hash_file = (int) (table_pos / SEGMENT_FILE_SIZE);
MappedByteBuffer hash_mbb = getBufferMap(hash_file);
int file_offset = (int) (table_pos % SEGMENT_FILE_SIZE);
int max;
int items;
long next_ptr;
synchronized(hash_mbb)
{
hash_mbb.position(file_offset + (int) LOCATION_HASH_MAX);
max = hash_mbb.getInt();
items = hash_mbb.getInt();
next_ptr = hash_mbb.getLong();
}
Assert.assertTrue("Max " + max + " items " + items + " table at " + table_pos + " file " + hash_file + " file offset " + file_offset ,max > items);
Assert.assertTrue(max > 4);
Assert.assertTrue(items >= 0);
//DeterministicStream det_stream = new DeterministicStream(key);
//int loc = det_stream.nextInt(max);
int hash = put_re.getKey().hashCode();
int loc = Math.abs(hash % max);
if (loc < 0) loc = 0;
double full = (double) items / (double) max;
while(true)
{
Assert.assertTrue(loc >= 0);
Assert.assertTrue(loc < max);
synchronized(hash_mbb)
{
long t1_check = System.nanoTime();
hash_mbb.position(file_offset + LOCATION_HASH_START + loc * 8);
long ptr = hash_mbb.getLong();
TimeRecord.record(t1_check, "slop_get_ptr");
if ((ptr == 0) && (full >= HASH_FULL))
{
// It isn't here and the hash is full, move on to next table
if (next_ptr == 0)
{
next_ptr = makeNewHashTable(max * HASH_MULTIPLCATION);
hash_mbb.position(file_offset + (int) LOCATION_HASH_NEXT);
hash_mbb.putLong(next_ptr);
}
TimeRecord.record(t1, "slop_put_key_value_table_rec");
putKeyValueTable(next_ptr, put_re);
return;
}
RecordEntry re = null;
if (ptr != 0)
{
re = new RecordEntry(ptr);
if (!re.getKey().equals(put_re.getKey()))
{
re = null;
}
}
if ((ptr == 0) || (re!=null))
{
//If we have an empty or a key match
long data_loc = put_re.storeItem(ptr);
hash_mbb.position(file_offset + LOCATION_HASH_START + loc * 8);
hash_mbb.putLong(data_loc);
if (ptr == 0)
{
hash_mbb.position(file_offset + LOCATION_HASH_ITEMS);
items++;
hash_mbb.putInt(items);
}
TimeRecord.record(t1, "slop_put_key_value_table_add");
return;
}
}
//loc = det_stream.nextInt(max);
loc = (loc + 131 ) % max;
}
}
public Block(MappedByteBuffer buf) throws IOException {
this.filePos = buf.getInt();
this.compressedSize = buf.getInt();
this.normalSize = buf.getInt();
this.flags = buf.getInt();
}
private void load(final String filename) throws IOException {
memoryMappedFile = new RandomAccessFile(filename, "r");
long fileSize = memoryMappedFile.length();
if (fileSize == 0L) {
throw new IOException("Index is a 0-byte file?");
}
int numNodes = (int) (fileSize / NODE_SIZE);
int buffIndex = (numNodes - 1) / MAX_NODES_IN_BUFFER;
int rest = (int) (fileSize % BLOCK_SIZE);
int blockSize = (rest > 0 ? rest : BLOCK_SIZE);
// Two valid relations between dimension and file size:
// 1) rest % NODE_SIZE == 0 makes sure either everything fits into buffer or rest is a multiple of NODE_SIZE;
// 2) (file_size - rest) % NODE_SIZE == 0 makes sure everything else is a multiple of NODE_SIZE.
if (rest % NODE_SIZE != 0 || (fileSize - rest) % NODE_SIZE != 0) {
throw new RuntimeException("ANNIndex initiated with wrong dimension size");
}
long position = fileSize - blockSize;
buffers = new MappedByteBuffer[buffIndex + 1];
boolean process = true;
int m = -1;
long index = fileSize;
while (position >= 0) {
MappedByteBuffer annBuf = memoryMappedFile.getChannel().map(
FileChannel.MapMode.READ_ONLY, position, blockSize);
annBuf.order(ByteOrder.LITTLE_ENDIAN);
buffers[buffIndex--] = annBuf;
for (int i = blockSize - (int) NODE_SIZE; process && i >= 0; i -= NODE_SIZE) {
index -= NODE_SIZE;
int k = annBuf.getInt(i); // node[i].n_descendants
if (m == -1 || k == m) {
roots.add(index);
m = k;
} else {
process = false;
}
}
blockSize = BLOCK_SIZE;
position -= blockSize;
}
}