下面列出了java.util.TreeSet#floor ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
/**
* 题目地址:https://leetcode-cn.com/problems/contains-duplicate-iii/
* 题意:nums [i] 和 nums [j] 的差的绝对值最大为 t,并且 i 和 j 之间的差的绝对值最大为 ķ
* -------------------------------------------------------------------
* 思考:
* 数据特征:
* 输入:数组、无序、所有整数(long)
* 输出:boolean
* -------------------------------------------------------------------
* 思路:在思路1中维护的滑动窗口中,找到满足t,需要线性搜索耗时O(o)
* 可以优化为维护一个BST,BST搜索耗时O(log(n))
* -------------------------------------------------------------------
* 时间复杂度:O(n*log(min(k,n)))
* 空间复杂度:O(min(k,n))
* -------------------------------------------------------------------
* 执行用时 :44 ms, 在所有 Java 提交中击败了25.79%的用户
* 内存消耗 :36.9 MB, 在所有 Java 提交中击败了89.89%的用户
*/
public boolean containsNearbyAlmostDuplicate(int[] nums, int k, int t) {
TreeSet<Long> set = new TreeSet<>();
for (int i = 0; i < nums.length; ++i) {
// Find the successor of current element
Long s = set.ceiling((long) nums[i]);//返回大于或等于给定键值的最小键值
if (s != null && s - nums[i] <= t) return true;
// Find the predecessor of current element
Long g = set.floor((long) nums[i]);//返回小于或等于给定键值的最大键值
if (g != null && nums[i] - g <= t) return true;
set.add((long) nums[i]);
if (set.size() > k) {
set.remove((long) nums[i - k]);
}
}
return false;
}
/**
* 10 20 30 ,t = 3
* 12 15 18
* 15 18 21
*
* @param nums
* @param k k 是索引之间的最大差值
* @param t 是两个数值之间的最大差值
* @return
*/
public boolean containsNearbyAlmostDuplicate(int[] nums, int k, int t) {
TreeSet<Long> set = new TreeSet<>();
for (int i = 0; i < nums.length; i++) {
// 向右扩展看地板
Long floor = set.floor((long) nums[i] + t);
if ((floor != null && floor >= nums[i])) {
return true;
}
// 向左扩展看天花板
Long ceiling = set.ceiling((long) nums[i] - t);
if ((ceiling != null && ceiling <= nums[i])) {
return true;
}
// 下面两步先后顺序无所谓
set.add((long) nums[i]);
if (set.size() == (k + 1)) {
set.remove((long) nums[i - k]);
}
}
return false;
}
public boolean containsNearbyAlmostDuplicate2(int[] nums, int k, int t) {
int len = nums.length;
if (len == 0 || k <= 0 || t < 0) {
return false;
}
TreeSet<Integer> treeSet = new TreeSet<>();
for (int i = 0; i < len; i++) {
Integer ceiling = treeSet.ceiling(nums[i]);
if (ceiling != null && (long) ceiling - (long) nums[i] <= t) {
return true;
}
Integer floor = treeSet.floor(nums[i]);
if (floor != null && (long) nums[i] - (long) floor <= t) {
return true;
}
treeSet.add(nums[i]);
if (i >= k) {
treeSet.remove(nums[i - k]);
}
}
return false;
}
/**
* Returns the cache {@link CacheSpan} corresponding to the provided lookup {@link CacheSpan}.
* <p>
* If the lookup position is contained by an existing entry in the cache, then the returned
* {@link CacheSpan} defines the file in which the data is stored. If the lookup position is not
* contained by an existing entry, then the returned {@link CacheSpan} defines the maximum extents
* of the hole in the cache.
*
* @param lookupSpan A lookup {@link CacheSpan} specifying a key and position.
* @return The corresponding cache {@link CacheSpan}.
*/
private CacheSpan getSpan(CacheSpan lookupSpan) {
String key = lookupSpan.key;
long offset = lookupSpan.position;
TreeSet<CacheSpan> entries = cachedSpans.get(key);
if (entries == null) {
return CacheSpan.createOpenHole(key, lookupSpan.position);
}
CacheSpan floorSpan = entries.floor(lookupSpan);
if (floorSpan != null &&
floorSpan.position <= offset && offset < floorSpan.position + floorSpan.length) {
// The lookup position is contained within floorSpan.
if (floorSpan.file.exists()) {
return floorSpan;
} else {
// The file has been deleted from under us. It's likely that other files will have been
// deleted too, so scan the whole in-memory representation.
removeStaleSpans();
return getSpan(lookupSpan);
}
}
CacheSpan ceilEntry = entries.ceiling(lookupSpan);
return ceilEntry == null ? CacheSpan.createOpenHole(key, lookupSpan.position) :
CacheSpan.createClosedHole(key, lookupSpan.position,
ceilEntry.position - lookupSpan.position);
}
public static TreeSet<String> makeTreeSet(Collection<String> blocks,
StringTransformer trans) {
TreeSet<String> tmp = new TreeSet<String>();
for(String filter : blocks) {
if(trans != null) {
filter = trans.transform(filter);
}
String possiblePrefix = tmp.floor(filter);
if (possiblePrefix != null && filter.startsWith(possiblePrefix)) {
// don't add - a prefix is already in the set:
} else {
// is this a prefix of the existing item?
String possibleLonger = tmp.ceiling(filter);
if(possibleLonger == null) {
} else if(possibleLonger.startsWith(filter)) {
tmp.remove(possibleLonger);
}
tmp.add(filter);
}
}
return tmp;
}
public static TreeSet<DoubleComparableEntry<String, DoubleComparableEntry<Integer, String>>> getWordList(File file) throws FileNotFoundException, IOException {
FileReader fis = new FileReader(file);
BufferedReader bis = new BufferedReader(fis);
char[] barr = new char[64];
int idxB = 0;
int b = 0;
String s = "";
TreeSet<DoubleComparableEntry<String, DoubleComparableEntry<Integer, String>>> set = new TreeSet<DoubleComparableEntry<String, DoubleComparableEntry<Integer, String>>>();
DoubleComparableEntry<String, DoubleComparableEntry<Integer, String>> e, en;
while (bis.ready()) {
b = bis.read();
if (" âÂÂâ âÂÂâ¦âÂÂâÂÂ\r\n\f\[email protected]#â«$%&*-+({}÷ÃÂãââ¬)!.,\"':;/?_[]=~`|^â¢îéö<>\\".indexOf(b) >= 0) {
if (idxB > 0) {
s = new String(barr, 0, idxB).toLowerCase();
idxB = 0;
e = new DoubleComparableEntry<String, DoubleComparableEntry<Integer, String>>(s, new DoubleComparableEntry<Integer, String>(1, ""));
if ((en = set.floor(e)) != null && en.equals(e)) {
DoubleComparableEntry<Integer, String> value = en.getValue();
value.setKey(value.getKey() + 1);
} else {
set.add(e);
}
} else {
// skip
}
} else {
barr[idxB++] = (char)b;
}
}
bis.close();
fis.close();
// for (DoubleComparableEntry<String, Integer> ee : set) {
// System.out.println(ee.getKey() + ": " + ee.getValue());
// }
return set;
}
public boolean containsNearbyAlmostDuplicate2(int[] nums, int k, int t) {
// 极端条件判断
int len = nums.length;
if (len == 0 || k <= 0 || t < 0) {
return false;
}
TreeSet<Long> treeSet = new TreeSet<>();
for (int i = 0; i < len; i++) {
Long floor = treeSet.floor((long) nums[i] + t);
if ((floor != null && floor >= nums[i])) {
return true;
}
Long ceiling = treeSet.ceiling((long) nums[i] - t);
if ((ceiling != null && ceiling <= nums[i])) {
return true;
}
// 超过 k 的时候,就要移除之前的元素
// k = 3
// 0,1,2,3
if (i >= k) {
treeSet.remove((long) nums[i - k]);
}
treeSet.add((long) nums[i]);
}
return false;
}
/**
* 要考虑到整型越界问题,所以要使用长整型
*
* @param nums
* @param k 索引差:使用 TreeSet,使得 TreeSet 一共就存 k 个元素
* @param t 数值的差
* @return
*/
public boolean containsNearbyAlmostDuplicate(int[] nums, int k, int t) {
int len = nums.length;
// 这里是或者
if (len <= 1 || k <= 0) {
return false;
}
TreeSet<Long> set = new TreeSet<>();
int i = 0;
while (i < len) {
// 找不符合题目要求的情况
Long floor = set.floor((long) nums[i]);
Long ceiling = set.ceiling((long) nums[i]);
boolean hasFloor = floor != null && nums[i] - floor <= t;
boolean hasCeiling = ceiling != null && ceiling - nums[i] <= t;
if (hasFloor || hasCeiling) {
return true;
}
// 注意,这里应该取等号,因为前面在判断的时候,就相当于已经把元素加进去了
// 而且从 nums[i - k] 表达式中也可以看出
if (i >= k) {
set.remove((long) nums[i - k]);
}
// 每一次都加入元素
set.add((long) nums[i]);
System.out.println("集合" + set);
i++;
}
return false;
}
/**
* floor returns preceding element
*/
public void testFloor() {
TreeSet q = set5();
Object e1 = q.floor(three);
assertEquals(three, e1);
Object e2 = q.floor(six);
assertEquals(five, e2);
Object e3 = q.floor(one);
assertEquals(one, e3);
Object e4 = q.floor(zero);
assertNull(e4);
}
public static void main (String [] args) throws Exception {
TreeSet<Long> startIndexes=new TreeSet<>();
int max=1;
long digitCount=0;
while (digitCount<2147483647) {
startIndexes.add(digitCount+1);
for (int i=1;i<=max;i++) digitCount+=countDigits(i);
max++;
}
BufferedReader br=new BufferedReader(new InputStreamReader(System.in));
int testCaseCount=Integer.parseInt(br.readLine());
for (int testCase=0;testCase<testCaseCount;testCase++) {
long l=Long.parseLong(br.readLine());
long lowerStartIndex=startIndexes.contains(l) ? l : startIndexes.floor(l);
int ans=-1;
for (int i=1;ans==-1;i++) {
int digits=countDigits(i);
if (lowerStartIndex+digits>l) {
int [] nums=new int [digits];
for (int ni=0;ni<digits;ni++) {
nums[digits-ni-1]=i%10;
i/=10;
}
ans=nums[(int)(l-lowerStartIndex)];
} else lowerStartIndex+=digits;
}
System.out.println(ans);
}
}
public boolean containsNearbyAlmostDuplicate(int[] nums, int k, int t) {
TreeSet<Long> set = new TreeSet<>();
for (int i = 0; i < nums.length; i++) {
Long l = (long) nums[i], floor = set.floor(l + t);
if (floor != null && floor >= l - t) return true;
set.add(l);
if (i >= k) set.remove((long) nums[i - k]);
}
return false;
}
@Override
public synchronized boolean isCached(String key, long position, long length) {
TreeSet<CacheSpan> entries = cachedSpans.get(key);
if (entries == null) {
return false;
}
CacheSpan lookupSpan = CacheSpan.createLookup(key, position);
CacheSpan floorSpan = entries.floor(lookupSpan);
if (floorSpan == null || floorSpan.position + floorSpan.length <= position) {
// We don't have a span covering the start of the queried region.
return false;
}
long queryEndPosition = position + length;
long currentEndPosition = floorSpan.position + floorSpan.length;
if (currentEndPosition >= queryEndPosition) {
// floorSpan covers the queried region.
return true;
}
Iterator<CacheSpan> iterator = entries.tailSet(floorSpan, false).iterator();
while (iterator.hasNext()) {
CacheSpan next = iterator.next();
if (next.position > currentEndPosition) {
// There's a hole in the cache within the queried region.
return false;
}
// We expect currentEndPosition to always equal (next.position + next.length), but
// perform a max check anyway to guard against the existence of overlapping spans.
currentEndPosition = Math.max(currentEndPosition, next.position + next.length);
if (currentEndPosition >= queryEndPosition) {
// We've found spans covering the queried region.
return true;
}
}
// We ran out of spans before covering the queried region.
return false;
}
private void addIfBetter(Path add, TreeSet<Path> sortedPaths) {
if (TRACE) {
System.err.println("Adding " + add);
}
if (sortedPaths.contains(add)) {
final Path other = sortedPaths.floor(add);
final Path best = mConfig.mPathSelector.better(add, other);
if (best == add) {
sortedPaths.remove(other);
sortedPaths.add(best);
if (TRACE) {
System.err.println("Replace " + other);
}
if (mConfig.mFlagAlternates) {
if (!other.inSync()) {
add.linkEquivalent(other);
}
}
} else {
if (TRACE) {
System.err.println("Prefer " + other);
}
if (mConfig.mFlagAlternates) {
if (!add.inSync()) {
other.linkEquivalent(add);
}
}
}
} else {
sortedPaths.add(add);
}
}
/**
* floor returns preceding element
*/
public void testFloor() {
TreeSet q = set5();
Object e1 = q.floor(three);
assertEquals(three, e1);
Object e2 = q.floor(six);
assertEquals(five, e2);
Object e3 = q.floor(one);
assertEquals(one, e3);
Object e4 = q.floor(zero);
assertNull(e4);
}
public void writeCollection(
Collection<DoubleComparableEntry<String, DoubleComparableEntry<Integer, String>>> set,
String fName,
boolean useDict,
TreeSet<DoubleComparableEntry<String, DoubleComparableEntry<Integer, String>>> allSet,
int total) throws IOException {
FileWriter fw = new FileWriter(fName);
retL.add(fName);
BufferedWriter bw = new BufferedWriter(fw);
bw.write(WORDLIST_TITLE);
long totLen = 0;
for (File st : lf) {
long length = st.length();
totLen += length;
bw.write(st.getAbsolutePath() + ": " + Util.nf.format(length) + " bytes.<br/>");
}
bw.write("Total of Bytes: " + Util.nf.format(totLen) + " bytes.<br/>");
bw.write("Number of Words: " + Util.nf.format(set.size()) + "<br/>");
if (total != 0) {
bw.write("Total Words: " + Util.nf.format(total) + "<br/>");
bw.write("Rate: " + Util.nf.format((float)set.size() * 100/total) + "%<br/>");
}
bw.write("<div align='center'>\r\n"
+ "<table border='0' cellspacing='0' cellpadding='0' width='100%' style='width:100.0%;border-collapse:collapse'>\r\n"
);
StringBuilder value = new StringBuilder("<tr>")
.append(TD1).append("No").append("</td>")
.append(TD1).append("Word").append("</td>")
.append(TD1).append("Frequent").append("</td>");
if (stardictL != null) {
value.append(TD1).append("Definition").append("</td>");
}
value.append("</tr>");
bw.write(value.toString());
int c = 0;
int accu = 0;
DoubleComparableEntry<String, DoubleComparableEntry<Integer, String>> eAll;
DoubleComparableEntry<Integer, String> value2;
for (DoubleComparableEntry<String, DoubleComparableEntry<Integer, String>> ee : set) {
String key = ee.getKey();
c++;
value2 = ee.getValue();
if (stardictL != null && useDict) {
StringBuilder sb = new StringBuilder();
int l = stardictL.size();
for (int i = 0; i < l; i++) {
List<String> readDef = stardictL.get(i).readDef(key);
if (readDef != null && readDef.size() > 0) {
sb.append(Util.collectionToString(readDef, false, "<br/><hr/>"));
} else {
sb.append("(not found)");
}
if (i < l - 1) {
sb.append("<hr/>");
}
}
value2.setValue(sb.toString());
} else {
}
Integer key2 = value2.getKey();
value = new StringBuilder("<tr>")
.append(TD1).append(c).append("</td>")
.append(TD1).append(key).append("</td>");
if (total == 0) {
value.append(TD1).append(key2).append("</td>");
if (stardictL != null) {
value.append(TD1).append(value2.getValue().replaceAll("\\n", "<br/>").replaceAll("\\\\", "\\")).append("</td>");
}
value.append("</tr>");
} else {
accu += key2;
value.append(TD1).append(key2).append(" (" + Util.nf.format((float)key2 * 100/total) + "% / " + Util.nf.format((float)accu * 100 / total) + "%)").append("</td>");
if (stardictL != null) {
value.append(TD1).append(value2.getValue().replaceAll("\\n", "<br/>").replaceAll("\\\\", "\\")).append("</td>");
}
value.append("</tr>");
}
if (allSet != null) {
totalWords += key2;
if ((eAll = allSet.floor(ee)) != null && eAll.equals(ee)) {
DoubleComparableEntry<Integer, String> v = eAll.getValue();
v.setKey(v.getKey() + key2);
} else {
allSet.add(ee);
}
}
//System.out.println(value);
bw.write(value.toString());
bw.newLine();
}
bw.write("</table></div>Took " + Util.nf.format((System.currentTimeMillis() - start)) + " milliseconds.</body></html>");
bw.flush();
bw.close();
}
public boolean containsNearbyAlmostDuplicate(int[] nums, int k, int t) {
int len = nums.length;
// 特判
if (len == 0 || k <= 0 || t < 0) {
return false;
}
// 泛型类型为 Long 是提交失败以后看到测试用例以后改的
TreeSet<Long> set = new TreeSet<>();
// 一个一个加进去
for (int i = 0; i < len; i++) {
// 检测逻辑:
// 以当前数为中心,向左边扩展,看看 set 里有没有大于等于 nums[i] - t 的元素
// 大于等于 nums[i] - t ,在这个数上面,故使用天花板函数 ceiling
Long ceiling = set.ceiling((long) nums[i] - t);
// 在 nums[i] 为中心,半径为 t 的左边有元素
// 因此直接返回 true 即可
if (ceiling != null && ceiling <= nums[i]) {
return true;
}
// 以当前数为中心,向左边扩展,看看 set 里有没有小于等于 nums[i] + t 的元素
// 小于等于 nums[i] + t ,在这个数下面,故使用地板函数 floor
Long floor = set.floor((long) nums[i] + t);
// 在 nums[i] 为中心,半径为 t 的右边有元素
// 因此直接返回 true 即可
if (floor != null && nums[i] <= floor) {
return true;
}
// 加进去的逻辑
set.add((long) nums[i]);
// 当 k = 3 时,[0,1,2,3,4],i = 3 的时候就要把 i = 0 删掉了
if (i >= k) {
set.remove((long) nums[i - k]);
}
}
// 遍历以后都找不到符合题意的数据对,就只能返回 False
return false;
}
/**
* Helper generates baseline value for timestamp via exponential smoothing
*
* @param df time series data
* @param tCurrent current time stamp
* @param changePoints set of change points
* @return baseline value for time stamp
*/
double makeBaseline(DataFrame df, long tCurrent, TreeSet<Long> changePoints) {
DateTime now = new DateTime(tCurrent);
int index = df.getLongs(COL_TIME).find(tCurrent);
if (index < 0) {
return Double.NaN;
}
if (this.baselineWeeks <= 0) {
return 0.0;
}
Long lastChangePoint = changePoints.floor(tCurrent);
// construct baseline
DataFrame raw = new DataFrame(COL_TIME, LongSeries.nulls(this.baselineWeeks))
.addSeries(COL_VALUE, DoubleSeries.nulls(this.baselineWeeks))
.addSeries(COL_WEIGHT, DoubleSeries.nulls(this.baselineWeeks));
long[] sTimestamp = raw.getLongs(COL_TIME).values();
double[] sValue = raw.getDoubles(COL_VALUE).values();
double[] sWeight = raw.getDoubles(COL_WEIGHT).values();
// exponential smoothing with weekly seasonality
for (int i = 0; i < this.baselineWeeks; i++) {
int offset = this.baselineWeeks - i;
long timestamp = now.minus(new Period().withWeeks(offset)).getMillis();
sTimestamp[i] = timestamp;
int valueIndex = df.getLongs(COL_TIME).find(timestamp);
if (valueIndex >= 0) {
sValue[i] = df.getDouble(COL_VALUE, valueIndex);
sWeight[i] = Math.pow(this.learningRate, offset - 1);
// adjust for change point
if (lastChangePoint != null && timestamp < lastChangePoint) {
sWeight[i] *= 0.001;
}
// adjust for outlier
if (BooleanSeries.isTrue(df.getBoolean(COL_OUTLIER, valueIndex))) {
sWeight[i] *= 0.001;
}
}
}
DataFrame data = raw.dropNull();
data.addSeries(COL_WEIGHTED_VALUE, data.getDoubles(COL_VALUE).multiply(data.get(COL_WEIGHT)));
DoubleSeries totalWeight = data.getDoubles(COL_WEIGHT).sum();
if (totalWeight.hasNull()) {
return Double.NaN;
}
DoubleSeries computed = data.getDoubles(COL_WEIGHTED_VALUE).sum().divide(totalWeight);
if (computed.hasNull()) {
return Double.NaN;
}
return computed.doubleValue();
}
@Override
public Integer getPreviousValue(final Calendar calendar) {
final TreeSet<Integer> newValues = getNewValuesFromDynamicExpressions(calendar);
final int currValue = calendar.get(field);
final Integer result = newValues.floor(currValue);
return isValidResult(calendar, result) ? result : null;
}