下面列出了java.lang.Math#max ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
/**
* convert given offheap byte[] +ve magnitude to int[] in little-endian
* format
*/
static int[] convertBytesToIntegerMagnitude(final UnsafeWrapper unsafe,
long memOffset, final long memEnd) {
// skip leading zeros
while (memOffset < memEnd && unsafe.getByte(memOffset) == 0) {
memOffset++;
}
final int numQs = ((int)(memEnd - memOffset) + 3) >>> 2;
final int[] result = new int[numQs];
long bend = memEnd;
int shift;
for (int i = 0; i < numQs; i++) {
// at least one byte will exist which does not need any shifting
int res = (unsafe.getByte(--bend) & 0xff);
shift = 8;
final long bstart = Math.max(memOffset, bend - 3);
while (bend > bstart) {
res |= ((unsafe.getByte(--bend) & 0xff) << shift);
shift += 8;
}
result[i] = res;
}
return result;
}
/** convert given byte[] +ve magnitude to int[] in little-endian format */
static int[] convertBytesToIntegerMagnitude(final byte[] mag, int offset,
final int endOffset) {
// skip leading zeros
while (offset < endOffset && mag[offset] == 0) {
offset++;
}
final int numQs = ((endOffset - offset) + 3) >>> 2;
final int[] result = new int[numQs];
int bend = endOffset, shift;
for (int i = 0; i < numQs; i++) {
// at least one byte will exist which does not need any shifting
int res = (mag[--bend] & 0xff);
shift = 8;
final int bstart = Math.max(offset, bend - 3);
while (bend > bstart) {
res |= ((mag[--bend] & 0xff) << shift);
shift += 8;
}
result[i] = res;
}
return result;
}
/**
* Create a new IVFSKNN classifier from a chromosome
*
* @param solution Solution used to create the classifier
*
*/
public IVFSKNN(Chromosome solution) {
ArrayList<Integer> kValues = new ArrayList<>();
double mA = solution.getmA();
double mB = solution.getmB();
int [] kv = solution.getBody();
for(int i = 0; i < kv.length; i++){
if (kv[i] == 1){
kValues.add(i+1);
}
}
minM = Math.min(mA, mB);
maxM = Math.max(mA, mB);
assignTrainMembership(kValues);
}
public static int find(int start, String adj, int n, int dp[]){
// If reaches till end
if (start == n)
return 0;
// If dp is saved
if (dp[start] != -1)
return dp[start];
dp[start] = 0;
int one = 0, zero = 0, k;
// Finding for each length
for (k = start; k < n; k++) {
if (adj.charAt(k) == '1') one++;
else zero++;
// If the character scanned is 1 if (adj.charAt(k) == '1') one++; else zero++;
// If one is greater than zero, add total length scanned till now
if (one > zero)
dp[start] = Math.max(dp[start], find(k + 1, adj, n, dp) + k - start + 1);
// Continue with next length
else
dp[start] = Math.max(dp[start], find(k + 1, adj, n, dp));
}
return dp[start];
}
public static void rbind(ExecutionContext ec, GPUContext gCtx, String instName, MatrixObject in1, MatrixObject in2, String outputName) {
if (ec.getGPUContext(0) != gCtx)
throw new DMLRuntimeException("GPU : Invalid internal state, the GPUContext set with the ExecutionContext is not the same used to run this LibMatrixCUDA function");
if(LOG.isTraceEnabled()) {
LOG.trace("GPU : rbind" + ", GPUContext=" + gCtx);
}
int rowsA = toInt(in1.getNumRows());
int colsA = toInt(in1.getNumColumns());
int rowsB = toInt(in2.getNumRows());
int colsB = toInt(in2.getNumColumns());
if (colsA != colsB){
throw new DMLRuntimeException("GPU : Invalid internal state - the columns must match up for a rbind operation");
}
// only Dense supported
MatrixObject out = getDenseMatrixOutputForGPUInstruction(ec, instName, outputName, rowsA + rowsB, colsA);
Pointer C = getDensePointer(gCtx, out, instName);
Pointer A = getDensePointer(gCtx, in1, instName);
Pointer B = getDensePointer(gCtx, in2, instName);
int maxRows = Math.max(rowsA, rowsB);
int maxCols = Math.max(colsA, colsB);
getCudaKernels(gCtx).launchKernel("rbind",
ExecutionConfig.getConfigForSimpleMatrixOperations(maxRows, maxCols),
A, B, C, rowsA, colsA, rowsB, colsB);
}
/**
* Get threads, blocks and shared memory for cumulative scan along columns
* @param gCtx a valid {@link GPUContext}
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
* @return integer array containing {blocks, threads, shared memory}
*/
private static int[] getKernelParamsForCumScan(GPUContext gCtx, int rows, int cols) {
final int MAX_THREADS = getMaxThreads(gCtx);
final int WARP_SIZE = getWarpSize(gCtx);
final int MAX_BLOCKS_Y = gCtx.getGPUProperties().maxGridSize[1];
int t1 = cols % MAX_THREADS;
int t2 = (t1 + WARP_SIZE - 1) / WARP_SIZE;
int t3 = t2 * WARP_SIZE;
int threads_x = gcd(MAX_THREADS, t3);
int blocks_x = Math.max(1, (cols + (threads_x - 1)) / (threads_x));
int block_height = Math.max(8, MAX_THREADS / threads_x);
int blocks_y = (rows + block_height - 1) / block_height;
int min_loop_length = 128;
if(rows <= min_loop_length) {
block_height = rows;
blocks_y = 1;
}
if(blocks_y > MAX_BLOCKS_Y) {
block_height = Math.max(2 ,2 * rows / MAX_BLOCKS_Y);
blocks_y = (rows + block_height - 1) / block_height;
}
if(LOG.isTraceEnabled()) {
LOG.trace("Launch configuration for cumulative aggregate: blocks_x=" + blocks_x + " blocks_y=" +
blocks_y + " block_height=" + block_height + " threads_x=" + threads_x);
}
return new int[] {blocks_x, blocks_y, threads_x, block_height};
}
void zoomCircuit(int dy) {
double newScale;
double oldScale = transform[0];
double val = dy*.01;
newScale = Math.max(oldScale+val, .2);
newScale = Math.min(newScale, 2.5);
setCircuitScale(newScale);
}
/**
* Returns the number of faces, that have to be created - povray wants
* to know this before the faces itself
*
* @return
*/
public int faceCount() {
int cnt = 0;
for (int i=1; i<size()-1; i++) {
int c_i = ((MeshSection)elementAt(i)).size();
int c_i1 = ((MeshSection)elementAt(i+1)).size();
if (c_i != c_i1) {
cnt += Math.max(c_i,c_i1);
} else if (c_i > 1) {
cnt += 2 * c_i;
}
}
return cnt;
}
private void getResultsFromIndex(int srcId, ArrayList<UnfilteredResult> resultsBeforeRefining){
String src = mStrings.get(srcId);
int srcLen = src.length();
for (int dstLen = Math.max(srcLen - mThreshold, mThreshold + 1);
dstLen <= mGlobalIndex.lastKey();
dstLen++) {
if(!mGlobalIndex.containsKey(dstLen)){
continue;
}
int delta = srcLen - dstLen;
for (int gramNo = 0; gramNo <= mThreshold; gramNo++) {
int candidateGramPos = getGramPos(dstLen, gramNo);
int candidateGramLen = getGramLen(dstLen, gramNo);
int startPos = Math.max(Math.max(candidateGramPos - gramNo,
candidateGramPos + delta + gramNo - mThreshold), 0);
int endPos = Math.min(Math.min(candidateGramPos + gramNo,
candidateGramPos + delta - gramNo + mThreshold), srcLen - candidateGramLen);
for (; startPos <= endPos; startPos++) {
String gram = src.substring(startPos, startPos + candidateGramLen);
ArrayList<Integer> invertedList = mGlobalIndex.get(dstLen).get(gramNo).get(gram);
if (invertedList != null) {
for (int k = 0; k < invertedList.size(); k++) {
int dstId = invertedList.get(k);
UnfilteredResult t = new UnfilteredResult();
t.dstId = dstId;
t.dstMatchPos = candidateGramPos;
t.srcMatchPos = startPos;
t.gramLen = candidateGramLen;
resultsBeforeRefining.add(t);
}
}
}
}
}
Collections.sort(resultsBeforeRefining, new Comparator<UnfilteredResult>() {
@Override
public int compare(UnfilteredResult a, UnfilteredResult b) {
if (a.dstId < b.dstId)
return -1;
if (a.dstId > b.dstId)
return 1;
return 0;
}
});
}
public static void main(String[] args) throws Exception {
// Size that a single card covers.
final int cardSize = 512;
WhiteBox wb = WhiteBox.getWhiteBox();
smallPageSize = wb.getVMPageSize();
largePageSize = wb.getVMLargePageSize();
allocGranularity = wb.getVMAllocationGranularity();
final long heapAlignment = lcm(cardSize * smallPageSize, largePageSize);
if (largePageSize == 0) {
System.out.println("Skip tests because large page support does not seem to be available on this platform.");
return;
}
if (largePageSize == smallPageSize) {
System.out.println("Skip tests because large page support does not seem to be available on this platform." +
"Small and large page size are the same.");
return;
}
// To get large pages for the card table etc. we need at least a 1G heap (with 4k page size).
// 32 bit systems will have problems reserving such an amount of contiguous space, so skip the
// test there.
if (!Platform.is32bit()) {
final long heapSizeForCardTableUsingLargePages = largePageSize * cardSize;
final long heapSizeDiffForCardTable = Math.max(Math.max(allocGranularity * cardSize, HEAP_REGION_SIZE), largePageSize);
Asserts.assertGT(heapSizeForCardTableUsingLargePages, heapSizeDiffForCardTable,
"To test we would require to use an invalid heap size");
testVM("case1: card table and bitmap use large pages (barely)", heapSizeForCardTableUsingLargePages, true, true);
testVM("case2: card table and bitmap use large pages (extra slack)", heapSizeForCardTableUsingLargePages + heapSizeDiffForCardTable, true, true);
testVM("case3: only bitmap uses large pages (barely not)", heapSizeForCardTableUsingLargePages - heapSizeDiffForCardTable, false, true);
}
// Minimum heap requirement to get large pages for bitmaps is 128M heap. This seems okay to test
// everywhere.
final int bitmapTranslationFactor = 8 * 8; // ObjectAlignmentInBytes * BitsPerByte
final long heapSizeForBitmapUsingLargePages = largePageSize * bitmapTranslationFactor;
final long heapSizeDiffForBitmap = Math.max(Math.max(allocGranularity * bitmapTranslationFactor, HEAP_REGION_SIZE),
Math.max(largePageSize, heapAlignment));
Asserts.assertGT(heapSizeForBitmapUsingLargePages, heapSizeDiffForBitmap,
"To test we would require to use an invalid heap size");
testVM("case4: only bitmap uses large pages (barely)", heapSizeForBitmapUsingLargePages, false, true);
testVM("case5: only bitmap uses large pages (extra slack)", heapSizeForBitmapUsingLargePages + heapSizeDiffForBitmap, false, true);
testVM("case6: nothing uses large pages (barely not)", heapSizeForBitmapUsingLargePages - heapSizeDiffForBitmap, false, false);
}
@GET
@Path("vars")
@Produces(MediaType.TEXT_HTML)
public Response var() throws IOException {
Map<String, String> variables = new LinkedHashMap<String, String>();
// Runtime
RuntimeMXBean runtimeBean = ManagementFactory.getRuntimeMXBean();
DateFormat dateTimeFormat = DateFormat.getDateTimeInstance(DateFormat.MEDIUM, DateFormat.FULL);
variables.put("state", getShutdownState() + "");
if (shuttingTime != 0) {
variables.put("shutdown-time", dateTimeFormat.format(new Date(shuttingTime)));
}
if (turningOnTime != 0) {
variables.put("turnon-time", dateTimeFormat.format(new Date(turningOnTime)));
}
variables.put("start-time", dateTimeFormat.format(new Date(runtimeBean.getStartTime())));
variables.put("uptime-in-ms", runtimeBean.getUptime() + "");
variables.put("vm-name", runtimeBean.getVmName());
variables.put("vm-vender", runtimeBean.getVmVendor());
variables.put("vm-version", runtimeBean.getVmVersion());
//BuildServer Version and Id
variables.put("buildserver-version", GitBuildId.getVersion() + "");
variables.put("buildserver-git-fingerprint", GitBuildId.getFingerprint() + "");
// OS
OperatingSystemMXBean osBean = ManagementFactory.getOperatingSystemMXBean();
variables.put("os-arch", osBean.getArch());
variables.put("os-name", osBean.getName());
variables.put("os-version", osBean.getVersion());
variables.put("num-processors", osBean.getAvailableProcessors() + "");
variables.put("load-average-past-1-min", osBean.getSystemLoadAverage() + "");
// Threads
variables.put("num-java-threads", ManagementFactory.getThreadMXBean().getThreadCount() + "");
// Memory
Runtime runtime = Runtime.getRuntime();
MemoryMXBean memoryBean = ManagementFactory.getMemoryMXBean();
variables.put("total-memory", runtime.totalMemory() + "");
variables.put("free-memory", runtime.freeMemory() + "");
variables.put("max-memory", runtime.maxMemory() + "");
variables.put("used-heap", memoryBean.getHeapMemoryUsage().getUsed() + "");
variables.put("used-non-heap", memoryBean.getNonHeapMemoryUsage().getUsed() + "");
// Build requests
variables.put("count-async-build-requests", asyncBuildRequests.get() + "");
variables.put("rejected-async-build-requests", rejectedAsyncBuildRequests.get() + "");
variables.put("successful-async-build-requests", successfulBuildRequests.get() + "");
variables.put("failed-async-build-requests", failedBuildRequests.get() + "");
// Build tasks
int max = buildExecutor.getMaxActiveTasks();
if (max == 0) {
variables.put("maximum-simultaneous-build-tasks-allowed", "unlimited");
} else {
variables.put("maximum-simultaneous-build-tasks-allowed", max + "");
}
variables.put("completed-build-tasks", buildExecutor.getCompletedTaskCount() + "");
maximumActiveBuildTasks = Math.max(maximumActiveBuildTasks, buildExecutor.getActiveTaskCount());
variables.put("maximum-simultaneous-build-tasks-occurred", maximumActiveBuildTasks + "");
variables.put("active-build-tasks", buildExecutor.getActiveTaskCount() + "");
StringBuilder html = new StringBuilder();
html.append("<html><body><tt>");
for (Map.Entry<String, String> variable : variables.entrySet()) {
html.append("<b>").append(variable.getKey()).append("</b> ")
.append(variable.getValue()).append("<br>");
}
html.append("</tt></body></html>");
return Response.ok(html.toString(), MediaType.TEXT_HTML_TYPE).build();
}
private void moveUp() {
top = Math.max(top - pageSize, 1);
bottom = Math.min(top + pageSize - 1, selection.size());
}
public static int c_diameter(Node node){
if(node == null)return 0;
//heights of left & right subtrees
int c_left_height = c_height(node.left);
int c_right_height = c_height(node.right);
//diameters of left an right subtrees
int c_left_diameter = c_diameter(node.left);
int c_right_diameter = c_diameter(node.right);
//return
return Math.max(c_left_height+c_right_height+1,Math.max(c_left_diameter,c_right_diameter));
}
/**
* Queries the {@link TaskTracker} for a set of map-completion events
* from a given event ID.
* @throws IOException
*/
private int getMapCompletionEvents() throws IOException {
int numNewMaps = 0;
MapTaskCompletionEventsUpdate update =
umbilical.getMapCompletionEvents(reduceTask.getJobID(),
fromEventId.get(),
MAX_EVENTS_TO_FETCH,
reduceTask.getTaskID());
TaskCompletionEvent events[] = update.getMapTaskCompletionEvents();
// Check if the reset is required.
// Since there is no ordering of the task completion events at the
// reducer, the only option to sync with the new jobtracker is to reset
// the events index
if (update.shouldReset()) {
fromEventId.set(0);
obsoleteMapIds.clear(); // clear the obsolete map
mapLocations.clear(); // clear the map locations mapping
}
// Update the last seen event ID
fromEventId.set(fromEventId.get() + events.length);
// Process the TaskCompletionEvents:
// 1. Save the SUCCEEDED maps in knownOutputs to fetch the outputs.
// 2. Save the OBSOLETE/FAILED/KILLED maps in obsoleteOutputs to stop
// fetching from those maps.
// 3. Remove TIPFAILED maps from neededOutputs since we don't need their
// outputs at all.
for (TaskCompletionEvent event : events) {
switch (event.getTaskStatus()) {
case SUCCEEDED:
{
URI u = URI.create(event.getTaskTrackerHttp());
String host = u.getHost();
TaskAttemptID taskId = event.getTaskAttemptId();
int duration = event.getTaskRunTime();
if (duration > maxMapRuntime) {
maxMapRuntime = duration;
// adjust max-fetch-retries based on max-map-run-time
maxFetchRetriesPerMap = Math.max(MIN_FETCH_RETRIES_PER_MAP,
getClosestPowerOf2((maxMapRuntime / BACKOFF_INIT) + 1));
}
URL mapOutputLocation = new URL(event.getTaskTrackerHttp() +
"/mapOutput?job=" + taskId.getJobID() +
"&map=" + taskId +
"&reduce=" + getPartition());
List<MapOutputLocation> loc = mapLocations.get(host);
if (loc == null) {
loc = Collections.synchronizedList
(new LinkedList<MapOutputLocation>());
mapLocations.put(host, loc);
}
loc.add(new MapOutputLocation(taskId, host, mapOutputLocation));
numNewMaps ++;
}
break;
case FAILED:
case KILLED:
case OBSOLETE:
{
obsoleteMapIds.add(event.getTaskAttemptId());
LOG.info("Ignoring obsolete output of " + event.getTaskStatus() +
" map-task: '" + event.getTaskAttemptId() + "'");
}
break;
case TIPFAILED:
{
copiedMapOutputs.add(event.getTaskAttemptId().getTaskID());
LOG.info("Ignoring output of failed map TIP: '" +
event.getTaskAttemptId() + "'");
}
break;
}
}
return numNewMaps;
}
private Chromosome generateChromoWithSupport()
{
int nVars, attr,tr;
double lb, ub, max_attr, min_attr;
nVars = this.ds.getnVars();
ArrayList<Gene> genes = new ArrayList<Gene>();
double[][] trans = this.ds.getRealTransactions();
tr = Randomize.Randint(0, this.ds.getnTrans());
for (int g=0; g < nVars; g++) {
Gene gen = new Gene();
attr = g;
gen.setAttr(attr);
gen.setType( this.ds.getAttributeType(attr));
gen.setCa(Randomize.RandintClosed(0,2));
max_attr = this.ds.getMax(attr);
min_attr = this.ds.getMin(attr);
if ( gen.getType() != Gene.NOMINAL ) {
if ( gen.getType() == Gene.REAL ) {
lb = Math.max(trans[tr][attr] - (this.allow_ampl[attr] / 2.0), min_attr);
ub = Math.min(trans[tr][attr] + (this.allow_ampl[attr] / 2.0), max_attr);
}
else {
lb = Math.max(trans[tr][attr] - ((int) this.allow_ampl[attr] / 2), min_attr);
ub = Math.min(trans[tr][attr] + ((int) this.allow_ampl[attr] / 2), max_attr);
}
}
else lb = ub = trans[tr][attr];
gen.setL(lb);
gen.setU(ub);
genes.add(gen.copy());
}
int set_antec = Randomize.Randint(0,genes.size()-1);
genes.get(set_antec).setCa(0);
int set_cons = Randomize.Randint(0,genes.size()-1);
while(set_antec == set_cons)
set_cons = Randomize.Randint(0,genes.size()-1);
genes.get(set_cons).setCa(1);
Chromosome c = new Chromosome(genes);
c.setFitness(this.evaluate_chromosome(c));
return c;
}
public static int c_height(Node node){
if(node == null) return 0;
return 1 + Math.max(c_height(node.left),c_height(node.right));
}
public static MR_long max ( MR_long x, MR_long y ) { return new MR_long(Math.max(x.get(),y.get())); }
public static MR_double max ( MR_double x, MR_double y ) { return new MR_double(Math.max(x.get(),y.get())); }
public static MR_float max ( MR_float x, MR_float y ) { return new MR_float(Math.max(x.get(),y.get())); }
public static MR_int max ( MR_int x, MR_int y ) { return new MR_int(Math.max(x.get(),y.get())); }