下面列出了java.io.BufferedInputStream#skip ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
private void testNegative(final int readLimit) throws IOException {
byte[] bytes = new byte[100];
for (int i = 0; i < bytes.length; i++) {
bytes[i] = (byte) i;
}
// buffer of 10 bytes
BufferedInputStream bis =
new BufferedInputStream(new ByteArrayInputStream(bytes), 10);
// skip 10 bytes
bis.skip(10);
bis.mark(readLimit); // 1 byte short in buffer size
// read 30 bytes in total, with internal buffer incread to 20
bis.read(new byte[20]);
try {
bis.reset();
fail();
} catch (IOException ex) {
assertEquals("Resetting to invalid mark", ex.getMessage());
}
}
private void testPositive(final int readLimit) throws IOException {
byte[] bytes = new byte[100];
for (int i = 0; i < bytes.length; i++) {
bytes[i] = (byte) i;
}
// buffer of 10 bytes
BufferedInputStream bis =
new BufferedInputStream(new ByteArrayInputStream(bytes), 10);
// skip 10 bytes
bis.skip(10);
bis.mark(readLimit); // buffer size would increase up to readLimit
// read 30 bytes in total, with internal buffer increased to 20
bis.read(new byte[20]);
bis.reset();
assert (bis.read() == 10);
}
private void testNegative(final int readLimit) throws IOException {
byte[] bytes = new byte[100];
for (int i=0; i < bytes.length; i++)
bytes[i] = (byte)i;
// buffer of 10 bytes
BufferedInputStream bis =
new BufferedInputStream(new ByteArrayInputStream(bytes), 10);
// skip 10 bytes
bis.skip(10);
bis.mark(readLimit); // 1 byte short in buffer size
// read 30 bytes in total, with internal buffer incread to 20
bis.read(new byte[20]);
try {
bis.reset();
fail();
} catch(IOException ex) {
assertEquals("Resetting to invalid mark", ex.getMessage());
}
}
public static void main(String[] args) throws Exception {
byte[] buffer = new byte[100];
ByteArrayInputStream bais = new ByteArrayInputStream(buffer);
BufferedInputStream bis = new BufferedInputStream(bais, 50);
byte[] smallBuf = new byte[10];
bis.read(smallBuf);
long available = bis.available();
int request = 50;
long s = bis.skip(request);
if (s < available && s < request) {
System.out.println("Skipped fewer bytes than requested and fewer bytes than available");
System.out.println("Available: " + available);
System.out.println("Requested: " + request);
System.out.println("Skipped: " + s);
}
}
/**
* Read the header from the Phoenix file.
*
* @param stream input stream
* @return raw header data
*/
private String readHeaderString(BufferedInputStream stream) throws IOException
{
int bufferSize = 100;
stream.mark(bufferSize);
byte[] buffer = new byte[bufferSize];
stream.read(buffer);
Charset charset = CharsetHelper.UTF8;
String header = new String(buffer, charset);
int prefixIndex = header.indexOf("PPX!!!!|");
int suffixIndex = header.indexOf("|!!!!XPP");
if (prefixIndex != 0 || suffixIndex == -1)
{
throw new IOException("File format not recognised");
}
int skip = suffixIndex + 9;
stream.reset();
stream.skip(skip);
return header.substring(prefixIndex + 8, suffixIndex);
}
public byte[] readData(long position, int chunkSize) {
try {
if (sending) {
ips = new FileInputStream(file);
bufferIps = new BufferedInputStream(ips);
if (position > 0) {
long skipped = bufferIps.skip(position);
LogUtil.i(TAG, "skipped length:" + skipped);
}
}
byte[] data = new byte[chunkSize];
int readSize = bufferIps.read(data, 0, chunkSize);
LogUtil.i(TAG,
"readSize:" + readSize + ",currProgress:" + getProgress() + ",fileLength:" + size);
addProgress(position + readSize);
bufferIps.close();
ips.close();
bufferIps = null;
ips = null;
return data;
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
public BOMDetector( BufferedInputStream in ) throws IOException {
this.in = in;
in.mark( 16 );
readBOM();
in.reset();
in.skip( bomSize );
}
/**
* Stores the given data for this Document
*/
private int store(InputStream stream) throws IOException {
final int bigBlockSize = POIFSConstants.BIG_BLOCK_MINIMUM_DOCUMENT_SIZE;
BufferedInputStream bis = new BufferedInputStream(stream, bigBlockSize+1);
bis.mark(bigBlockSize);
// Do we need to store as a mini stream or a full one?
if(bis.skip(bigBlockSize) < bigBlockSize) {
_stream = new NPOIFSStream(_filesystem.getMiniStore());
_block_size = _filesystem.getMiniStore().getBlockStoreBlockSize();
} else {
_stream = new NPOIFSStream(_filesystem);
_block_size = _filesystem.getBlockStoreBlockSize();
}
// start from the beginning
bis.reset();
// Store it
OutputStream os = _stream.getOutputStream();
byte buf[] = new byte[1024];
int length = 0;
for (int readBytes; (readBytes = bis.read(buf)) != -1; length += readBytes) {
os.write(buf, 0, readBytes);
}
// Pad to the end of the block with -1s
int usedInBlock = length % _block_size;
if (usedInBlock != 0 && usedInBlock != _block_size) {
int toBlockEnd = _block_size - usedInBlock;
byte[] padding = new byte[toBlockEnd];
Arrays.fill(padding, (byte)0xFF);
os.write(padding);
}
// Tidy and return the length
os.close();
return length;
}
private void testPositive(final int readLimit) throws IOException {
byte[] bytes = new byte[100];
for (int i=0; i < bytes.length; i++)
bytes[i] = (byte)i;
// buffer of 10 bytes
BufferedInputStream bis =
new BufferedInputStream(new ByteArrayInputStream(bytes), 10);
// skip 10 bytes
bis.skip(10);
bis.mark(readLimit); // buffer size would increase up to readLimit
// read 30 bytes in total, with internal buffer increased to 20
bis.read(new byte[20]);
bis.reset();
assert(bis.read() == 10);
}
@NoWarning("SR_NOT_CHECKED")
void notBug2(BufferedInputStream any, long anyLong) throws IOException {
while (anyLong > 0) {
anyLong -= any.skip(anyLong);
}
}
public void openForSending() throws IOException {
fin = new FileInputStream(sinfile);
bin = new BufferedInputStream(fin);
bin.skip(getHeaderLength());
totalread=getHeaderLength();
partcount=0;
}
public BOMDetector( BufferedInputStream in ) throws IOException {
this.in = in;
in.mark( 16 );
readBOM();
in.reset();
in.skip( bomSize );
}
/**
* Send files.
* */
private boolean writeToSocket(NSPElement nspElem, long start, long end){
if (interrupt)
return true;
currSockPW.write(NETPacket.getCode206(nspElem.getSize(), start, end));
currSockPW.flush();
try{
long count = end - start + 1; // Meeh. Somehow it works
InputStream elementIS = context.getContentResolver().openInputStream(nspElem.getUri());
if (elementIS == null) {
issueDescription = "NET Unable to obtain InputStream";
return true;
}
BufferedInputStream bis = new BufferedInputStream(elementIS);
int readPice = 8388608; // = 8Mb
byte[] byteBuf;
if (bis.skip(start) != start){
issueDescription = "NET: Unable to skip requested range.";
nspElem.setStatus(context.getResources().getString(R.string.status_failed_to_upload));
return true;
}
long currentOffset = 0;
while (currentOffset < count){
if (interrupt)
return true;
if ((currentOffset+readPice) >= count){
readPice = (int) (count - currentOffset);
}
byteBuf = new byte[readPice];
if (bis.read(byteBuf) != readPice){
issueDescription = "NET: Reading of nspElem stream suddenly ended.";
return true;
}
currSockOS.write(byteBuf);
currentOffset += readPice;
updateProgressBar((int) ((currentOffset+1)/(count/100+1)));
}
currSockOS.flush(); // TODO: check if this really needed.
bis.close();
resetProgressBar();
}
catch (IOException ioe){
issueDescription = "NET: File transmission failed. Returned: "+ioe.getMessage();
nspElem.setStatus(context.getResources().getString(R.string.status_failed_to_upload)); // TODO: REDUNDANT?
return true;
}
return false;
}
/**
* NSPContent command handler
* isItRawRequest - if True, just ask NS what's needed
* - if False, send ticket
* */
private boolean handleNSPContent(PFSProvider pfsElement, boolean isItRawRequest){
int requestedNcaID;
if (isItRawRequest) {
byte[] readByte = readUsb();
if (readByte == null || readByte.length != 4) {
issueDescription = "GL Handle 'Content' command: [Read requested ID]";
return false;
}
requestedNcaID = ByteBuffer.wrap(readByte).order(ByteOrder.LITTLE_ENDIAN).getInt();
}
else {
requestedNcaID = pfsElement.getNcaTicketID();
}
long realNcaOffset = pfsElement.getNca(requestedNcaID).getNcaOffset()+pfsElement.getBodySize();
long realNcaSize = pfsElement.getNca(requestedNcaID).getNcaSize();
long readFrom = 0;
int readPice = 16384; // 8mb NOTE: consider switching to 1mb 1048576
byte[] readBuf;
try{
BufferedInputStream bufferedInStream = new BufferedInputStream(context.getContentResolver().openInputStream(nspElements.get(0).getUri())); // TODO: refactor?
if (bufferedInStream.skip(realNcaOffset) != realNcaOffset) {
issueDescription = "GL Failed to skip NCA offset";
return false;
}
int updateProgressPeriods = 0;
while (readFrom < realNcaSize){
if (readPice > (realNcaSize - readFrom))
readPice = (int)(realNcaSize - readFrom); // TODO: Troubles could raise here
readBuf = new byte[readPice];
if (bufferedInStream.read(readBuf) != readPice) {
issueDescription = "GL Failed to read data from file.";
return false;
}
if (writeUsb(readBuf)) {
issueDescription = "GL Failed to write data into NS.";
return false;
}
readFrom += readPice;
if (updateProgressPeriods++ % 1024 == 0) // Update progress bar after every 16mb goes to NS
updateProgressBar((int) ((readFrom+1)/(realNcaSize/100+1)));
}
bufferedInStream.close();
resetProgressBar();
}
catch (java.io.IOException ioe){
issueDescription = "GL Failed to read NCA ID "+requestedNcaID+". IO Exception: "+ioe.getMessage();
return false;
}
return true;
}
@ExpectWarning("SR_NOT_CHECKED")
void bug1(BufferedInputStream any, long anyLong) throws IOException {
any.skip(anyLong);
}
@ExpectWarning("SR_NOT_CHECKED")
void bug2(BufferedInputStream any, long anyLong) throws IOException {
any.skip(anyLong);
}
@NoWarning("SR_NOT_CHECKED")
long notBug(BufferedInputStream any, long anyLong) throws IOException {
return any.skip(anyLong);
}
public CopybookRecordReader(FileSplit genericSplit, JobConf job)
throws IOException {
try {
String cblPath = job.get(Const.COPYBOOK_INPUTFORMAT_CBL_HDFS_PATH_CONF);
if (cblPath == null) {
if (job != null) {
MapWork mrwork = Utilities.getMapWork(job);
if (mrwork == null) {
System.out.println("When running a client side hive job you have to set \"copybook.inputformat.cbl.hdfs.path\" before executing the query." );
System.out.println("When running a MR job we can get this from the hive TBLProperties" );
}
Map<String, PartitionDesc> map = mrwork.getPathToPartitionInfo();
for (Map.Entry<String, PartitionDesc> pathsAndParts : map.entrySet()) {
System.out.println("Hey");
Properties props = pathsAndParts.getValue().getProperties();
cblPath = props
.getProperty(Const.COPYBOOK_INPUTFORMAT_CBL_HDFS_PATH_CONF);
break;
}
}
}
FileSystem fs = FileSystem.get(job);
BufferedInputStream inputStream = new BufferedInputStream(
fs.open(new Path(cblPath)));
CobolCopybookLoader copybookInt = new CobolCopybookLoader();
externalRecord = copybookInt
.loadCopyBook(inputStream, "RR", CopybookLoader.SPLIT_NONE, 0,
"cp037", Convert.FMT_MAINFRAME, 0, null);
int fileStructure = Constants.IO_FIXED_LENGTH;
for (ExternalField field : externalRecord.getRecordFields()) {
recordByteLength += field.getLen();
}
// jump to the point in the split that the first whole record of split
// starts at
FileSplit split = (FileSplit) genericSplit;
start = split.getStart();
end = start + split.getLength();
final Path file = split.getPath();
BufferedInputStream fileIn = new BufferedInputStream(fs.open(split
.getPath()));
if (start != 0) {
pos = start - (start % recordByteLength) + recordByteLength;
fileIn.skip(pos);
}
ret = LineIOProvider.getInstance().getLineReader(
fileStructure,
LineIOProvider.getInstance().getLineProvider(fileStructure));
ret.open(fileIn, externalRecord);
} catch (Exception e) {
e.printStackTrace();
}
}
@Override
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
String cblPath = context.getConfiguration().get(
Const.COPYBOOK_INPUTFORMAT_CBL_HDFS_PATH_CONF);
FileSystem fs = FileSystem.get(context.getConfiguration());
BufferedInputStream inputStream = new BufferedInputStream(fs.open(new Path(
cblPath)));
CobolCopybookLoader copybookInt = new CobolCopybookLoader();
try {
externalRecord = copybookInt
.loadCopyBook(inputStream, "RR", CopybookLoader.SPLIT_NONE, 0,
"cp037", Convert.FMT_MAINFRAME, 0, null);
int fileStructure = Constants.IO_FIXED_LENGTH;
for (ExternalField field : externalRecord.getRecordFields()) {
recordByteLength += field.getLen();
}
// jump to the point in the split that the first whole record of split
// starts at
FileSplit fileSplit = (FileSplit) split;
start = fileSplit.getStart();
end = start + fileSplit.getLength();
final Path file = fileSplit.getPath();
BufferedInputStream fileIn = new BufferedInputStream(fs.open(fileSplit
.getPath()));
if (start != 0) {
pos = start - (start % recordByteLength) + recordByteLength;
fileIn.skip(pos);
}
ret = LineIOProvider.getInstance().getLineReader(
fileStructure,
LineIOProvider.getInstance().getLineProvider(fileStructure));
ret.open(fileIn, externalRecord);
} catch (Exception e) {
throw new RuntimeException(e);
}
}