下面列出了怎么用java.util.concurrent.ConcurrentSkipListMap的API类实例代码及写法,或者点击链接到github查看源代码。
/**
* descending iterator of key set is inverse ordered
*/
public void testKeySetDescendingIteratorOrder() {
ConcurrentSkipListMap map = map5();
NavigableSet s = map.navigableKeySet();
Iterator i = s.descendingIterator();
Integer last = (Integer)i.next();
assertEquals(last, five);
int count = 1;
while (i.hasNext()) {
Integer k = (Integer)i.next();
assertTrue(last.compareTo(k) > 0);
last = k;
++count;
}
assertEquals(5, count);
}
private static void realMain(String[] args) throws Throwable {
Map<Integer, Long>[] maps = (Map<Integer, Long>[]) new Map[]{
new HashMap<>(),
new Hashtable<>(),
new IdentityHashMap<>(),
new LinkedHashMap<>(),
new TreeMap<>(),
new WeakHashMap<>(),
new ConcurrentHashMap<>(),
new ConcurrentSkipListMap<>()
};
// for each map type.
for (Map<Integer, Long> map : maps) {
try {
testMap(map);
} catch(Exception all) {
unexpected("Failed for " + map.getClass().getName(), all);
}
}
}
@Override
protected Map<Integer, ZclAttribute> initializeServerAttributes() {
Map<Integer, ZclAttribute> attributeMap = new ConcurrentSkipListMap<>();
attributeMap.put(ATTR_MEASUREDVALUE, new ZclAttribute(this, ATTR_MEASUREDVALUE, "Measured Value", ZclDataType.SIGNED_16_BIT_INTEGER, true, true, false, true));
attributeMap.put(ATTR_MINMEASUREDVALUE, new ZclAttribute(this, ATTR_MINMEASUREDVALUE, "Min Measured Value", ZclDataType.SIGNED_16_BIT_INTEGER, true, true, false, false));
attributeMap.put(ATTR_MAXMEASUREDVALUE, new ZclAttribute(this, ATTR_MAXMEASUREDVALUE, "Max Measured Value", ZclDataType.SIGNED_16_BIT_INTEGER, true, true, false, true));
attributeMap.put(ATTR_TOLERANCE, new ZclAttribute(this, ATTR_TOLERANCE, "Tolerance", ZclDataType.UNSIGNED_16_BIT_INTEGER, false, true, false, false));
attributeMap.put(ATTR_SCALEDVALUE, new ZclAttribute(this, ATTR_SCALEDVALUE, "Scaled Value", ZclDataType.SIGNED_16_BIT_INTEGER, false, true, false, true));
attributeMap.put(ATTR_MINSCALEDVALUE, new ZclAttribute(this, ATTR_MINSCALEDVALUE, "Min Scaled Value", ZclDataType.SIGNED_16_BIT_INTEGER, false, true, false, false));
attributeMap.put(ATTR_MAXSCALEDVALUE, new ZclAttribute(this, ATTR_MAXSCALEDVALUE, "Max Scaled Value", ZclDataType.SIGNED_16_BIT_INTEGER, false, true, false, false));
attributeMap.put(ATTR_SCALEDTOLERANCE, new ZclAttribute(this, ATTR_SCALEDTOLERANCE, "Scaled Tolerance", ZclDataType.UNSIGNED_16_BIT_INTEGER, false, true, false, true));
attributeMap.put(ATTR_SCALE, new ZclAttribute(this, ATTR_SCALE, "Scale", ZclDataType.UNSIGNED_8_BIT_INTEGER, false, true, false, false));
return attributeMap;
}
private static void realMain(String[] args) throws Throwable {
Map<Integer, Long>[] maps = (Map<Integer, Long>[]) new Map[]{
new HashMap<>(),
new Hashtable<>(),
new IdentityHashMap<>(),
new LinkedHashMap<>(),
new TreeMap<>(),
new WeakHashMap<>(),
new ConcurrentHashMap<>(),
new ConcurrentSkipListMap<>()
};
// for each map type.
for (Map<Integer, Long> map : maps) {
try {
testMap(map);
} catch(Exception all) {
unexpected("Failed for " + map.getClass().getName(), all);
}
}
}
/**
* Gets the total number of bytes from the beginning of the log to the position of {@code token}. This includes any
* overhead due to headers and empty space.
* @param token the point until which the log has been read.
* @param messageEntries the list of {@link MessageInfo} that were read when producing {@code token}.
* @param logEndOffsetBeforeFind the end offset of the log before a find was attempted.
* @param indexSegments the list of index segments to use.
* @return the total number of bytes read from the log at the position of {@code token}.
*/
private long getTotalBytesRead(StoreFindToken token, List<MessageInfo> messageEntries, Offset logEndOffsetBeforeFind,
ConcurrentSkipListMap<Offset, IndexSegment> indexSegments) {
long bytesRead = 0;
if (token.getType().equals(FindTokenType.IndexBased)) {
bytesRead = getAbsolutePositionInLogForOffset(token.getOffset(), indexSegments);
} else if (token.getType().equals(FindTokenType.JournalBased)) {
if (messageEntries.size() > 0) {
bytesRead = getAbsolutePositionInLogForOffset(token.getOffset(), indexSegments) + messageEntries.get(
messageEntries.size() - 1).getSize();
} else {
bytesRead = getAbsolutePositionInLogForOffset(logEndOffsetBeforeFind, indexSegments);
}
}
return bytesRead;
}
@Test(dataProvider = "StreamTestData<Integer>", dataProviderClass = StreamTestDataProvider.class)
public void testSimpleGroupBy(String name, TestData.OfRef<Integer> data) throws ReflectiveOperationException {
Function<Integer, Integer> classifier = i -> i % 3;
// Single-level groupBy
exerciseMapTabulation(data, groupingBy(classifier),
new GroupedMapAssertion<>(classifier, HashMap.class,
new ListAssertion<>()));
exerciseMapTabulation(data, groupingByConcurrent(classifier),
new GroupedMapAssertion<>(classifier, ConcurrentHashMap.class,
new ListAssertion<>()));
// With explicit constructors
exerciseMapTabulation(data,
groupingBy(classifier, TreeMap::new, toCollection(HashSet::new)),
new GroupedMapAssertion<>(classifier, TreeMap.class,
new CollectionAssertion<Integer>(HashSet.class, false)));
exerciseMapTabulation(data,
groupingByConcurrent(classifier, ConcurrentSkipListMap::new,
toCollection(HashSet::new)),
new GroupedMapAssertion<>(classifier, ConcurrentSkipListMap.class,
new CollectionAssertion<Integer>(HashSet.class, false)));
}
private SortedMap<Long, Map<Integer, PreparedStatement>> subSetMap(long startTime, long endTime, Order order) {
Long startKey = prepMap.floorKey(startTime);
Long endKey = prepMap.floorKey(endTime);
// The start time is already compressed, start the request from earliest non-compressed
if(startKey == null) {
startKey = prepMap.ceilingKey(startTime);
}
// Just in case even the end is in the past
if(endKey == null) {
endKey = startKey;
}
// Depending on the order, these must be read in the correct order also..
SortedMap<Long, Map<Integer, PreparedStatement>> statementMap;
if(order == Order.ASC) {
statementMap = prepMap.subMap(startKey, true, endKey,
true);
} else {
statementMap = new ConcurrentSkipListMap<>((var0, var2) -> var0 < var2?1:(var0 == var2?0:-1));
statementMap.putAll(prepMap.subMap(startKey, true, endKey, true));
}
return statementMap;
}
private static void realMain(String[] args) throws Throwable {
Map<Integer, Long>[] maps = (Map<Integer, Long>[]) new Map[]{
new HashMap<>(),
new Hashtable<>(),
new IdentityHashMap<>(),
new LinkedHashMap<>(),
new TreeMap<>(),
new WeakHashMap<>(),
new ConcurrentHashMap<>(),
new ConcurrentSkipListMap<>()
};
// for each map type.
for (Map<Integer, Long> map : maps) {
try {
testMap(map);
} catch(Exception all) {
unexpected("Failed for " + map.getClass().getName(), all);
}
}
}
public VolatileGeneration(File logPath, Serializer<K> keySerializer, Serializer<V> valueSerializer, Comparator<K> comparator, boolean loadExistingReadOnly) throws IOException {
this.ordering = Ordering.from(comparator);
map = new ConcurrentSkipListMap(comparator);
this.logPath = logPath;
this.keySerializer = keySerializer;
this.valueSerializer = valueSerializer;
deleted = new Object();
if (loadExistingReadOnly) {
if (!logPath.exists()) throw new IllegalArgumentException(logPath.getAbsolutePath()+" does not exist");
transactionLog = null;
replayTransactionLog(logPath, true);
} else {
if (logPath.exists()) throw new IllegalArgumentException("to load existing logs set loadExistingReadOnly to true or create a new log and use replayTransactionLog");
transactionLog = new TransactionLog.Writer(logPath, keySerializer, valueSerializer, false);
}
stuffToClose = SharedReference.create((Closeable)new Closeable() {
public void close() throws IOException {
closeWriter();
}
});
}
@Override
protected Map<Integer, Class<? extends ZclCommand>> initializeClientCommands() {
Map<Integer, Class<? extends ZclCommand>> commandMap = new ConcurrentSkipListMap<>();
commandMap.put(0x0000, AddSceneCommand.class);
commandMap.put(0x0001, ViewSceneCommand.class);
commandMap.put(0x0002, RemoveSceneCommand.class);
commandMap.put(0x0003, RemoveAllScenesCommand.class);
commandMap.put(0x0004, StoreSceneCommand.class);
commandMap.put(0x0005, RecallSceneCommand.class);
commandMap.put(0x0006, GetSceneMembershipCommand.class);
commandMap.put(0x0040, EnhancedAddSceneCommand.class);
commandMap.put(0x0041, EnhancedViewSceneCommand.class);
commandMap.put(0x0042, CopySceneCommand.class);
return commandMap;
}
@Test
public void testGetQueueRunning() throws Exception {
List<PlannedJob> twoJobs = new ArrayList<>();
Map<Long, CallableJob> runs =
new ConcurrentSkipListMap<>();
Mockito.reset(agentConsumer);
for (int i = 0; i < 2; i++) {
JobSpec aJob = getTestJob("bleep bloop");
aJob.setName("job" + i);
PlannedJob plannedJob = new PlannedJob(aJob, new DateTime());
twoJobs.add(plannedJob);
when(jobDao.getJob(i)).thenReturn(aJob);
CallableQuery cq =
new CallableQuery(plannedJob, jobDao, reporting,
null, null, null, null, null, 1);
runs.put(new Long(i), cq);
}
when(jobDao.getJobRuns(null, AgentConsumer.LIMIT_JOB_RUNS)).thenReturn(runs);
mockMvc.perform(get("/api/running"))
.andExpect(status().isOk())
.andExpect(content().string(OM.writeValueAsString(twoJobs)));
}
@DataProvider(name="setProvider", parallel=true)
public static Iterator<Object[]> setCases() {
final List<Object[]> cases = new LinkedList<>();
cases.add(new Object[] { new HashSet<>() });
cases.add(new Object[] { new LinkedHashSet<>() });
cases.add(new Object[] { new TreeSet<>() });
cases.add(new Object[] { new java.util.concurrent.ConcurrentSkipListSet<>() });
cases.add(new Object[] { new java.util.concurrent.CopyOnWriteArraySet<>() });
cases.add(new Object[] { new ExtendsAbstractSet<>() });
cases.add(new Object[] { Collections.newSetFromMap(new HashMap<>()) });
cases.add(new Object[] { Collections.newSetFromMap(new LinkedHashMap<>()) });
cases.add(new Object[] { Collections.newSetFromMap(new TreeMap<>()) });
cases.add(new Object[] { Collections.newSetFromMap(new ConcurrentHashMap<>()) });
cases.add(new Object[] { Collections.newSetFromMap(new ConcurrentSkipListMap<>()) });
cases.add(new Object[] { new HashSet<Integer>(){{add(42);}} });
cases.add(new Object[] { new ExtendsAbstractSet<Integer>(){{add(42);}} });
cases.add(new Object[] { new LinkedHashSet<Integer>(){{add(42);}} });
cases.add(new Object[] { new TreeSet<Integer>(){{add(42);}} });
return cases.iterator();
}
@Test
@SuppressWarnings("unchecked")
public void testWriteDictionary() throws Exception {
instance.writeDictionary(new LinkedHashMap<Object, Object>() {{
put("string", "value");
put("number", 123456);
put("list", new ArrayList<Object>() {{
add("list-item-1");
add("list-item-2");
}});
put("dict", new ConcurrentSkipListMap() {{
put(123, ByteBuffer.wrap("test".getBytes()));
put(456, "thing");
}});
}});
assertEquals("d6:string5:value6:numberi123456e4:listl11:list-item-111:list-item-2e4:dictd3:1234:test3:4565:thingee",
new String(out.toByteArray(), instance.getCharset()));
}
@Override
public void switchModel(boolean aggregate) {
ConcurrentSkipListMap<String, AbstractGraphRow> selectedModel;
if (aggregate) {
// issue 64: we must fail requests for aggregate in unsupported cases
if (modelAggregate.isEmpty() && !model.isEmpty()) {
throw new UnsupportedOperationException("Seems you've requested "
+ "aggregate mode for graph that don't support it. We apologize...");
}
selectedModel = modelAggregate;
} else {
selectedModel = model;
}
graphPanel.getGraphObject().setRows(selectedModel);
graphPanel.clearRowsTab();
for (AbstractGraphRow abstractGraphRow : selectedModel.values()) {
graphPanel.addRow(abstractGraphRow);
}
isAggregate = aggregate;
getSettingsPanel().setAggregateMode(aggregate);
}
@java.io.Serial
private void readObject(ObjectInputStream in)
throws IOException, ClassNotFoundException
{
// Don't call in.defaultReadObject()
// Read in serialized fields
ObjectInputStream.GetField gfields = in.readFields();
// Get the one we want
@SuppressWarnings("unchecked")
Vector<SocketPermission> permissions = (Vector<SocketPermission>)gfields.get("permissions", null);
perms = new ConcurrentSkipListMap<>(new SPCComparator());
for (SocketPermission sp : permissions) {
perms.put(sp.getName(), sp);
}
}
/**
* Get all the statuses from all connected SDK harnesses within specified timeout. Any errors
* getting status from the SDK harnesses will be returned in the map.
*
* @param timeout max time waiting for the response from each SDK harness.
* @param timeUnit timeout time unit.
* @return All the statuses in a map keyed by the SDK harness id.
*/
public Map<String, String> getAllWorkerStatuses(long timeout, TimeUnit timeUnit) {
if (isClosed.get()) {
throw new IllegalStateException("BeamWorkerStatusGrpcService already closed.");
}
// return result in worker id sorted map.
Map<String, String> allStatuses = new ConcurrentSkipListMap<>(Comparator.naturalOrder());
Set<String> connectedClientIdsCopy;
synchronized (connectedClient) {
connectedClientIdsCopy = ImmutableSet.copyOf(connectedClient.keySet());
}
connectedClientIdsCopy
.parallelStream()
.forEach(
workerId ->
allStatuses.put(workerId, getSingleWorkerStatus(workerId, timeout, timeUnit)));
return allStatuses;
}
private synchronized void initScheduler(Configuration conf) {
validateConf(conf);
//Use ConcurrentSkipListMap because applications need to be ordered
this.applications =
new ConcurrentSkipListMap<ApplicationId, SchedulerApplication<FiCaSchedulerApp>>();
this.minimumAllocation =
Resources.createResource(conf.getInt(
YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB));
initMaximumResourceCapability(
Resources.createResource(conf.getInt(
YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB),
conf.getInt(
YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES),
conf.getInt(
YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_GCORES,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_GCORES)));
this.usePortForNodeName = conf.getBoolean(
YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME,
YarnConfiguration.DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME);
this.metrics = QueueMetrics.forQueue(DEFAULT_QUEUE_NAME, null, false,
conf);
this.activeUsersManager = new ActiveUsersManager(metrics);
}
@DataProvider(name="setProvider", parallel=true)
public static Iterator<Object[]> setCases() {
final List<Object[]> cases = new LinkedList<>();
cases.add(new Object[] { new HashSet<>() });
cases.add(new Object[] { new LinkedHashSet<>() });
cases.add(new Object[] { new TreeSet<>() });
cases.add(new Object[] { new java.util.concurrent.ConcurrentSkipListSet<>() });
cases.add(new Object[] { new java.util.concurrent.CopyOnWriteArraySet<>() });
cases.add(new Object[] { new ExtendsAbstractSet<>() });
cases.add(new Object[] { Collections.newSetFromMap(new HashMap<>()) });
cases.add(new Object[] { Collections.newSetFromMap(new LinkedHashMap()) });
cases.add(new Object[] { Collections.newSetFromMap(new TreeMap<>()) });
cases.add(new Object[] { Collections.newSetFromMap(new ConcurrentHashMap<>()) });
cases.add(new Object[] { Collections.newSetFromMap(new ConcurrentSkipListMap<>()) });
cases.add(new Object[] { new HashSet<Integer>(){{add(42);}} });
cases.add(new Object[] { new ExtendsAbstractSet<Integer>(){{add(42);}} });
cases.add(new Object[] { new LinkedHashSet<Integer>(){{add(42);}} });
cases.add(new Object[] { new TreeSet<Integer>(){{add(42);}} });
return cases.iterator();
}
/**
* descendingKeySet is ordered
*/
public void testDescendingKeySetOrder() {
ConcurrentSkipListMap map = map5();
Set s = map.descendingKeySet();
Iterator i = s.iterator();
Integer last = (Integer)i.next();
assertEquals(last, five);
int count = 1;
while (i.hasNext()) {
Integer k = (Integer)i.next();
assertTrue(last.compareTo(k) > 0);
last = k;
++count;
}
assertEquals(5, count);
}
public PersistentOfflineTopicStats estimateUnloadedTopicBacklog(ManagedLedgerFactoryImpl factory,
TopicName topicName) throws Exception {
String managedLedgerName = topicName.getPersistenceNamingEncoding();
long numberOfEntries = 0;
long totalSize = 0;
final NavigableMap<Long, MLDataFormats.ManagedLedgerInfo.LedgerInfo> ledgers = new ConcurrentSkipListMap<>();
final PersistentOfflineTopicStats offlineTopicStats = new PersistentOfflineTopicStats(managedLedgerName,
brokerName);
// calculate total managed ledger size and number of entries without loading the topic
readLedgerMeta(factory, topicName, ledgers);
for (MLDataFormats.ManagedLedgerInfo.LedgerInfo ls : ledgers.values()) {
numberOfEntries += ls.getEntries();
totalSize += ls.getSize();
if (accurate) {
offlineTopicStats.addLedgerDetails(ls.getEntries(), ls.getTimestamp(), ls.getSize(), ls.getLedgerId());
}
}
offlineTopicStats.totalMessages = numberOfEntries;
offlineTopicStats.storageSize = totalSize;
if (log.isDebugEnabled()) {
log.debug("[{}] Total number of entries - {} and size - {}", managedLedgerName, numberOfEntries, totalSize);
}
// calculate per cursor message backlog
calculateCursorBacklogs(factory, topicName, ledgers, offlineTopicStats);
offlineTopicStats.statGeneratedAt.setTime(System.currentTimeMillis());
return offlineTopicStats;
}
@SafeVarargs
public static <K, V> NavigableMap<K, V> safeSortedMap( Entry<K, V>... entries ) {
NavigableMap<K, V> map = new ConcurrentSkipListMap<>();
for ( Entry<K, V> entry : entries ) {
map.put( entry.key(), entry.value() );
}
return map;
}
public CommitWatcher(BufferPool bufferPool, XceiverClientSpi xceiverClient) {
this.bufferPool = bufferPool;
this.xceiverClient = xceiverClient;
commitIndex2flushedDataMap = new ConcurrentSkipListMap<>();
totalAckDataLength = 0;
futureMap = new ConcurrentHashMap<>();
}
/**
* Re-validates the {@code token} to ensure that it is consistent with the given view of {@code indexSegments}. If it
* is not, a {@link FindTokenType#Uninitialized} token is returned.
* @param token the {@link StoreFindToken} to revalidate.
* @param indexSegments the view of the index segments to revalidate against.
* @return {@code token} if is consistent with {@code indexSegments}, a new {@link FindTokenType#Uninitialized}
* token otherwise.
*/
private StoreFindToken revalidateStoreFindToken(StoreFindToken token,
ConcurrentSkipListMap<Offset, IndexSegment> indexSegments) {
StoreFindToken revalidatedToken = token;
Offset offset = token.getOffset();
switch (token.getType()) {
case Uninitialized:
// nothing to do.
break;
case JournalBased:
// A journal based token, but the previous index segment doesn't belong to the same log segment, might be caused
// by compaction, or blob stored added to many records so that the offset in the token is now pointing to the
// previous log segment.
Offset floorOffset = indexSegments.floorKey(offset);
if (floorOffset == null || !floorOffset.getName().equals(offset.getName())) {
revalidatedToken = new StoreFindToken();
logger.info("Revalidated token {} because it is invalid for the index segment map", token);
}
break;
case IndexBased:
// An index based token, but the offset is not in the segments, might be caused by the compaction
if (!indexSegments.containsKey(offset)) {
revalidatedToken = new StoreFindToken();
logger.info("Revalidated token {} because it is invalid for the index segment map", token);
}
break;
default:
throw new IllegalStateException("Unrecognized token type: " + token.getType());
}
return revalidatedToken;
}
private static void realMain(String[] args) throws Throwable {
boolean shortRun = args.length > 0 && args[0].equals("-shortrun");
Object[][] mapKeys = makeTestData(shortRun ? (TEST_SIZE / 2) : TEST_SIZE);
// loop through data sets
for (Object[] keys_desc : mapKeys) {
Map<Object, Object>[] maps = (Map<Object, Object>[]) new Map[]{
new HashMap<>(),
new Hashtable<>(),
new IdentityHashMap<>(),
new LinkedHashMap<>(),
new TreeMap<>(),
new WeakHashMap<>(),
new ConcurrentHashMap<>(),
new ConcurrentSkipListMap<>()
};
// for each map type.
for (Map<Object, Object> map : maps) {
String desc = (String) keys_desc[0];
Object[] keys = (Object[]) keys_desc[1];
try {
testMap(map, desc, keys);
} catch(Exception all) {
unexpected("Failed for " + map.getClass().getName() + " with " + desc, all);
}
}
}
}
public boolean setCommit(Comparable[] key, long version, long commit) {
NavigableMap<Long, VersionedValue> rowData = cache.getOrDefault(key, new ConcurrentSkipListMap<>());
VersionedValue value = rowData.get(version);
if (value == null) {
return false;
}
if (commit == INVALID_TX) {
rowData.remove(version);
} else {
rowData.put(version, new VersionedValue(version, commit, value.isDeleted(), value.getValue()));
}
garbageCollect(rowData);
cache.put(key, rowData);
return true;
}
@Override
protected Map<Integer, Class<? extends ZclCommand>> initializeClientCommands() {
Map<Integer, Class<? extends ZclCommand>> commandMap = new ConcurrentSkipListMap<>();
commandMap.put(0x0000, DisplayMessageCommand.class);
commandMap.put(0x0001, CancelMessageCommand.class);
commandMap.put(0x0002, DisplayProtectedMessageCommand.class);
commandMap.put(0x0003, CancelAllMessagesCommand.class);
return commandMap;
}
public FederatedZKLogMetadataStore(
DistributedLogConfiguration conf,
URI namespace,
ZooKeeperClient zkc,
OrderedScheduler scheduler) throws IOException {
this.conf = conf;
this.namespace = namespace;
this.zkc = zkc;
this.scheduler = scheduler;
this.forceCheckLogExistence = conf.getFederatedCheckExistenceWhenCacheMiss();
this.subNamespaces = new ConcurrentSkipListMap<URI, SubNamespace>();
this.log2Locations = new ConcurrentHashMap<String, URI>();
this.zkSubnamespacesPath = namespace.getPath() + "/" + ZNODE_SUB_NAMESPACES;
this.maxLogsPerSubnamespace = conf.getFederatedMaxLogsPerSubnamespace();
// fetch the sub namespace
Set<URI> uris;
try {
uris = FutureUtils.result(fetchSubNamespaces(this));
} catch (Exception e) {
if (e instanceof IOException) {
throw (IOException) e;
} else {
throw new IOException(e);
}
}
for (URI uri : uris) {
SubNamespace subNs = new SubNamespace(uri);
if (null == subNamespaces.putIfAbsent(uri, subNs)) {
subNs.watch();
logger.info("Watched sub namespace {}", uri);
}
}
logger.info("Federated ZK LogMetadataStore is initialized for {}", namespace);
}
@Override
protected Map<Integer, ZclAttribute> initializeServerAttributes() {
Map<Integer, ZclAttribute> attributeMap = new ConcurrentSkipListMap<>();
attributeMap.put(ATTR_MAXDURATION, new ZclAttribute(this, ATTR_MAXDURATION, "Max Duration", ZclDataType.UNSIGNED_16_BIT_INTEGER, true, true, true, false));
return attributeMap;
}
/** Creates a new instance of UndoableObjectsList */
public UndoableObjects()
{
stack_level = 0;
objects = new ConcurrentSkipListMap<Storable, UndoableObjectNode>();
deleted_objects_stack = new Vector<Collection<UndoableObjectNode>>();
}
@Override
protected Map<Integer, ZclAttribute> initializeServerAttributes() {
Map<Integer, ZclAttribute> attributeMap = new ConcurrentSkipListMap<>();
attributeMap.put(ATTR_CHECKININTERVAL, new ZclAttribute(this, ATTR_CHECKININTERVAL, "Checkin Interval", ZclDataType.UNSIGNED_32_BIT_INTEGER, true, true, true, true));
attributeMap.put(ATTR_LONGPOLLINTERVAL, new ZclAttribute(this, ATTR_LONGPOLLINTERVAL, "Long Poll Interval", ZclDataType.UNSIGNED_32_BIT_INTEGER, true, true, false, true));
attributeMap.put(ATTR_SHORTPOLLINTERVAL, new ZclAttribute(this, ATTR_SHORTPOLLINTERVAL, "Short Poll Interval", ZclDataType.UNSIGNED_16_BIT_INTEGER, true, true, false, true));
attributeMap.put(ATTR_FASTPOLLTIMEOUT, new ZclAttribute(this, ATTR_FASTPOLLTIMEOUT, "Fast Poll Timeout", ZclDataType.UNSIGNED_16_BIT_INTEGER, true, true, false, true));
attributeMap.put(ATTR_CHECKININTERVALMIN, new ZclAttribute(this, ATTR_CHECKININTERVALMIN, "Checkin Interval Min", ZclDataType.UNSIGNED_32_BIT_INTEGER, true, true, false, false));
attributeMap.put(ATTR_LONGPOLLINTERVALMIN, new ZclAttribute(this, ATTR_LONGPOLLINTERVALMIN, "Long Poll Interval Min", ZclDataType.UNSIGNED_32_BIT_INTEGER, true, true, false, false));
attributeMap.put(ATTR_FASTPOLLTIMEOUTMIN, new ZclAttribute(this, ATTR_FASTPOLLTIMEOUTMIN, "Fast Poll Timeout Min", ZclDataType.UNSIGNED_32_BIT_INTEGER, true, true, false, false));
return attributeMap;
}