下面列出了org.junit.jupiter.api.extension.ExtensionConfigurationException#com.datastax.driver.core.Session 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@Test
public void testGetBigInteger() throws Exception {
testInSession(new Callback() {
@Override
public void call(Session session) throws Exception {
final BigIntegerObject nObject = DatastaxMapperFactory.newInstance().mapTo(BigIntegerObject.class).iterator(session.execute("select * from test_number")).next();
assertEquals(1, nObject.bi.longValue());
assertEquals(2, nObject.i.longValue());
assertEquals(3, nObject.vi.longValue());
assertEquals(3, nObject.dec.longValue());
assertEquals(3, nObject.f.longValue());
assertEquals(3, nObject.d.longValue());
}
});
}
static Session createSession() throws Exception {
Cluster cluster = Cluster.builder().addContactPoint("127.0.0.1")
// long read timeout is sometimes needed on slow travis ci machines
.withSocketOptions(new SocketOptions().setReadTimeoutMillis(30000))
.withQueryOptions(getQueryOptions())
.build();
Session session = cluster.connect();
session.execute("CREATE KEYSPACE IF NOT EXISTS test WITH REPLICATION ="
+ " { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }");
session.execute("CREATE TABLE IF NOT EXISTS test.users"
+ " (id int PRIMARY KEY, fname text, lname text)");
try {
session.execute("TRUNCATE test.users");
} catch (NoHostAvailableException e) {
// sometimes slow, so give it a second chance
session.execute("TRUNCATE test.users");
}
for (int i = 0; i < 10; i++) {
session.execute("INSERT INTO test.users (id, fname, lname) VALUES (" + i + ", 'f" + i
+ "', 'l" + i + "')");
}
return session;
}
@Test(expected = IllegalStateException.class /* the datastax driver complains that page fetching will cause a deadlock */ )
public void paginatedResultInCassandraThreadsThrowsException() {
int numberOfRecords = 500;
int numberOfPages = 10;
Session session = cassandraCQLUnit.getSession();
PreparedStatement insertStmt = session.prepare(insertInto("app_scale_jobs").values(
Arrays.asList("job_id", "ref_id"),
Arrays.asList(bindMarker(), bindMarker())
));
for (int i = 0; i < numberOfRecords; i++) {
ResultSet resultSet = session.execute(insertStmt.bind("job-" + i, UUID.randomUUID()));
assertThat(resultSet.wasApplied()).isTrue();
}
PreparedStatement loadStmt = session.prepare(select("job_id", "ref_id").from("app_scale_jobs"));
Observable<ResultSet> results = new CassStoreHelper(session, Schedulers.immediate()).execute(loadStmt.bind()
// force pagination, and pages to be fetched on demand as the ResultSet is iterated on
.setFetchSize(numberOfRecords / numberOfPages));
results.doOnNext(rows -> rows.forEach(row ->
assertThat(row.getString(0)).startsWith("job-"))
).toCompletable().await(1, TimeUnit.MINUTES);
}
@Test
public void initializeCqlTwice() throws TTransportException, IOException, InterruptedException {
final InetSocketAddress contactPoint = CassandraTestHelper.getCassandraContactPoint();
Cluster cluster = Cluster.builder()
.addContactPointsWithPorts(Collections.singletonList(contactPoint))
.build();
Session session = cluster.connect();
session.execute("DROP KEYSPACE IF EXISTS keyspaceTestCassandraSinkIT");
Assert.assertNull(session.getCluster().getMetadata().getKeyspace("keyspaceTestCassandraSinkIT"));
_do();
Assert.assertNotNull(session.getCluster().getMetadata().getKeyspace("keyspaceTestCassandraSinkIT"));
Assert.assertNotNull(session.getCluster().getMetadata().getKeyspace("keyspaceTestCassandraSinkIT")
.getTable("tableTestCassandraSinkIT"));
_do();
Assert.assertNotNull(session.getCluster().getMetadata().getKeyspace("keyspaceTestCassandraSinkIT"));
Assert.assertNotNull(session.getCluster().getMetadata().getKeyspace("keyspaceTestCassandraSinkIT")
.getTable("tableTestCassandraSinkIT"));
session.execute("DROP KEYSPACE IF EXISTS keyspaceTestCassandraSinkIT");
session.close();
cluster.close();
}
@Test
public void testGetFloat() throws Exception {
testInSession(new Callback() {
@Override
public void call(Session session) throws Exception {
final FloatObject nObject = DatastaxMapperFactory.newInstance().mapTo(FloatObject.class).iterator(session.execute("select * from test_number")).next();
assertEquals(1.0, nObject.bi, 0.001);
assertEquals(2.0, nObject.i, 0.001);
assertEquals(3.0, nObject.vi, 0.001);
assertEquals(3.5, nObject.dec, 0.001);
assertEquals(3.7, nObject.f, 0.001);
assertEquals(3.9, nObject.d, 0.001);
}
});
}
@Inject
public CassandraMessageDAO(Session session, CassandraTypesProvider typesProvider, BlobStore blobStore,
BlobId.Factory blobIdFactory, CassandraConfiguration cassandraConfiguration,
CassandraConsistenciesConfiguration consistenciesConfiguration,
CassandraMessageId.Factory messageIdFactory) {
this.cassandraAsyncExecutor = new CassandraAsyncExecutor(session);
this.consistencyLevel = consistenciesConfiguration.getRegular();
this.typesProvider = typesProvider;
this.blobStore = blobStore;
this.blobIdFactory = blobIdFactory;
this.configuration = cassandraConfiguration;
this.messageIdFactory = messageIdFactory;
this.insert = prepareInsert(session);
this.delete = prepareDelete(session);
this.selectMetadata = prepareSelect(session, METADATA);
this.selectHeaders = prepareSelect(session, HEADERS);
this.selectFields = prepareSelect(session, FIELDS);
this.selectBody = prepareSelect(session, BODY);
this.selectAllMessagesWithAttachment = prepareSelectAllMessagesWithAttachment(session);
this.cidParser = Cid.parser().relaxed();
}
@Test
public void testMapTupleToTupleValue() throws Exception {
testInSession(new Callback() {
@Override
public void call(Session session) throws Exception {
final DatastaxMapper<DbObjectsWithTupleValue> mapper = DatastaxMapperFactory.newInstance().mapTo(DbObjectsWithTupleValue.class);
ResultSet rs = session.execute("select id, t from dbobjects_tuple");
final Iterator<DbObjectsWithTupleValue> iterator = mapper.iterator(rs);
DbObjectsWithTupleValue next = iterator.next();
assertEquals(1, next.getId());
assertEquals("t1", next.getT().getString(0));
assertEquals(12l, next.getT().getLong(1));
assertEquals(13, next.getT().getInt(2));
assertFalse(iterator.hasNext());
}
});
}
private PreparedStatement prepareConditionalInsert(Session session) {
return session.prepare(
insertInto(CassandraACLTable.TABLE_NAME)
.value(CassandraACLTable.ID, bindMarker(CassandraACLTable.ID))
.value(CassandraACLTable.ACL, bindMarker(CassandraACLTable.ACL))
.value(CassandraACLTable.VERSION, INITIAL_VALUE)
.ifNotExists());
}
@Inject
MetadataDAO(Session session, MessageId.Factory messageIdFactory, MetadataSerializer metadataSerializer) {
this.cassandraAsyncExecutor = new CassandraAsyncExecutor(session);
this.addStatement = prepareAdd(session);
this.removeStatement = prepareRemove(session);
this.removeAllStatement = prepareRemoveAll(session);
this.readStatement = prepareRead(session, PAYLOAD);
this.readMessageIdStatement = prepareRead(session, MESSAGE_ID);
this.messageIdFactory = messageIdFactory;
this.metadataSerializer = metadataSerializer;
}
@Override
public Session getCassandraSession() {
if (cassandraSession != null) {
return cassandraSession;
} else {
throw new ProcessException("Unable to get the Cassandra session.");
}
}
public static Observable<Pair<Object, Object>> readTwoColumnTable(Session sourceSession, String table) {
Pair<String, String> columnNames = resolveColumnNamesInTwoColumnTable(sourceSession, table);
String primaryKey = columnNames.getLeft();
String valueColumn = columnNames.getRight();
PreparedStatement queryAllStatement = sourceSession.prepare(
String.format("SELECT * FROM %s WHERE token(%s) > :min AND token(%s) <= :max", table, primaryKey, primaryKey)
);
AsyncCassandraExecutor executor = new AsyncCassandraExecutor(sourceSession, PAGE_SIZE, SPLIT);
return executor.rawRangeQuery2(primaryKey, valueColumn, queryAllStatement);
}
static void executeAndUpdateTimer(Session session, Statement stmt, Timer timer) {
long startTime = System.nanoTime();
try{
session.execute(stmt);
}finally{
if(timer != null) {
timer.update(System.nanoTime() - startTime, TimeUnit.NANOSECONDS);
}
}
}
public MergeTokenRangesFunction(Comparable maxValue,
Comparable minValue,
Session session,
IPartitioner partitioner,
Iterable<Comparable> allRanges) {
this.maxValue = maxValue;
this.minValue = minValue;
this.session = session;
this.partitioner = partitioner;
this.allRanges = allRanges;
}
@Inject
UserPerBucketDAO(Session session) {
cassandraAsyncExecutor = new CassandraAsyncExecutor(session);
addStatement = prepareAddUser(session);
removeStatement = prepareRemoveBucket(session);
listStatement = prepareListUser(session);
listBucketsStatement = prepareListBuckets(session);
}
private static CassandraMailboxManager provideMailboxManager(Session session,
PreDeletionHooks preDeletionHooks,
CassandraMailboxSessionMapperFactory mapperFactory,
MessageId.Factory messageIdFactory) {
MailboxACLResolver aclResolver = new UnionMailboxACLResolver();
GroupMembershipResolver groupMembershipResolver = new SimpleGroupMembershipResolver();
MessageParser messageParser = new MessageParser();
InVMEventBus eventBus = new InVMEventBus(new InVmEventDelivery(new RecordingMetricFactory()), EventBusTestFixture.RETRY_BACKOFF_CONFIGURATION, new MemoryEventDeadLetters());
StoreRightManager storeRightManager = new StoreRightManager(mapperFactory, aclResolver, groupMembershipResolver, eventBus);
Authenticator noAuthenticator = null;
Authorizator noAuthorizator = null;
StoreMailboxAnnotationManager annotationManager = new StoreMailboxAnnotationManager(mapperFactory, storeRightManager,
LIMIT_ANNOTATIONS, LIMIT_ANNOTATION_SIZE);
SessionProviderImpl sessionProvider = new SessionProviderImpl(noAuthenticator, noAuthorizator);
CassandraPerUserMaxQuotaManager maxQuotaManager = new CassandraPerUserMaxQuotaManager(new CassandraPerUserMaxQuotaDao(session),
new CassandraPerDomainMaxQuotaDao(session),
new CassandraGlobalMaxQuotaDao(session));
CassandraCurrentQuotaManager currentQuotaUpdater = new CassandraCurrentQuotaManager(session);
StoreQuotaManager storeQuotaManager = new StoreQuotaManager(currentQuotaUpdater, maxQuotaManager);
QuotaRootResolver quotaRootResolver = new DefaultUserQuotaRootResolver(sessionProvider, mapperFactory);
ListeningCurrentQuotaUpdater quotaUpdater = new ListeningCurrentQuotaUpdater(currentQuotaUpdater, quotaRootResolver, eventBus, storeQuotaManager);
QuotaComponents quotaComponents = new QuotaComponents(maxQuotaManager, storeQuotaManager, quotaRootResolver);
AttachmentContentLoader attachmentContentLoader = null;
MessageSearchIndex index = new SimpleMessageSearchIndex(mapperFactory, mapperFactory, new DefaultTextExtractor(), attachmentContentLoader);
CassandraMailboxManager manager = new CassandraMailboxManager(mapperFactory, sessionProvider, new NoMailboxPathLocker(),
messageParser, messageIdFactory, eventBus, annotationManager, storeRightManager,
quotaComponents, index, MailboxManagerConfiguration.DEFAULT, preDeletionHooks);
eventBus.register(quotaUpdater);
eventBus.register(new MailboxAnnotationListener(mapperFactory, sessionProvider));
eventBus.register(mapperFactory.deleteMessageListener());
return manager;
}
public static BoundStatement getBoundStatementNamed(
String query, String contactPoint, Map<String, String> values) {
try (com.datastax.driver.core.Cluster cluster =
defaultBuilder().addContactPoint(contactPoint).build()) {
Session session = cluster.connect();
com.datastax.driver.core.PreparedStatement prepared = session.prepare(query);
BoundStatement bound = prepared.bind();
values.forEach((k, v) -> bound.setString(k, v));
return bound;
}
}
public static ResultSet makeNativeBoundQueryWithNameParams(
String query, String contactPoint, Map<String, Long> values) {
com.datastax.driver.core.Cluster cluster =
defaultBuilder().addContactPoint(contactPoint).build();
Session session = cluster.connect();
com.datastax.driver.core.PreparedStatement prepared = session.prepare(query);
BoundStatement bound = prepared.bind();
values.forEach((k, v) -> bound.setLong(k, v));
return executeQueryWithFreshSession(bound, contactPoint, session, cluster);
}
static ResultSetFuture executeAsyncAndUpdateTimer(Session session, Statement stmt, Timer timer) {
long startTime = System.nanoTime();
try{
ResultSetFuture rs = session.executeAsync(stmt);
if(timer != null) {
rs.addListener(() -> timer.update(System.nanoTime() - startTime, TimeUnit.NANOSECONDS), MoreExecutors.directExecutor());
}
return rs;
}finally{
}
}
private PreparedStatement prepareAdd(Session session) {
return session.prepare(insertInto(TABLE)
.value(OWNER, bindMarker(OWNER))
.value(MESSAGE_ID, bindMarker(MESSAGE_ID))
.value(BUCKET_NAME, bindMarker(BUCKET_NAME))
.value(BLOB_ID, bindMarker(BLOB_ID)));
}
public CassandraBaseDAO(Session session, ObjectMapper objectMapper, CassandraConfiguration config) {
this.session = session;
this.objectMapper = objectMapper;
this.config = config;
init();
}
public Response getRecords(
String keySpace, String table, Map<String, Object> filters, List<String> fields) {
Response response = new Response();
Session session = connectionManager.getSession(keySpace);
try {
Select select;
if (CollectionUtils.isNotEmpty(fields)) {
select = QueryBuilder.select((String[]) fields.toArray()).from(keySpace, table);
} else {
select = QueryBuilder.select().all().from(keySpace, table);
}
if (MapUtils.isNotEmpty(filters)) {
Select.Where where = select.where();
for (Map.Entry<String, Object> filter : filters.entrySet()) {
Object value = filter.getValue();
if (value instanceof List) {
where = where.and(QueryBuilder.in(filter.getKey(), ((List) filter.getValue())));
} else {
where = where.and(QueryBuilder.eq(filter.getKey(), filter.getValue()));
}
}
}
ResultSet results = null;
results = session.execute(select);
response = CassandraUtil.createResponse(results);
} catch (Exception e) {
ProjectLogger.log(Constants.EXCEPTION_MSG_FETCH + table + " : " + e.getMessage(), e);
throw new ProjectCommonException(
ResponseCode.SERVER_ERROR.getErrorCode(),
ResponseCode.SERVER_ERROR.getErrorMessage(),
ResponseCode.SERVER_ERROR.getResponseCode());
}
return response;
}
@SuppressWarnings({"rawtypes", "unchecked"})
public State makeState(Map configuration, IMetricsContext metrics, int partitionIndex, int numPartitions) {
if (clientFactory == null) {
clientFactory = new MapConfiguredCqlClientFactory(configuration);
}
Session session;
if(options.keyspace != null) {
session = clientFactory.getSession(options.keyspace);
} else {
session = clientFactory.getSession();
}
CassandraCqlMapState state = new CassandraCqlMapState(session, mapper, options, configuration);
state.registerMetrics(configuration, metrics, options.mapStateMetricName);
CachedMap cachedMap = new CachedMap(state, options.localCacheSize);
MapState mapState;
if (stateType == StateType.NON_TRANSACTIONAL) {
mapState = NonTransactionalMap.build(cachedMap);
} else if (stateType == StateType.OPAQUE) {
mapState = OpaqueMap.build(cachedMap);
} else if (stateType == StateType.TRANSACTIONAL) {
mapState = TransactionalMap.build(cachedMap);
} else {
throw new RuntimeException("Unknown state type: " + stateType);
}
return new SnapshottableMap(mapState, new Values(options.globalKey));
}
public static void init(Cluster cluster) {
try (Session systemSession = cluster.connect()) {
String createKeyspace = String.format("CREATE KEYSPACE IF NOT EXISTS %s WITH replication = " +
"{'class':'SimpleStrategy','replication_factor':'1'};", TEST_KEYSPACE);
systemSession.execute(createKeyspace);
String createTable = String.format("CREATE TABLE %s.%s (id text, value text, PRIMARY KEY(id))", TEST_KEYSPACE, TEST_TABLE);
systemSession.execute(createTable);
}
}
@Override
protected void configure() {
bind(CassandraConfiguration.class).to(SystemPropertiesCassandraConfiguration.class);
bind(Cluster.class).toProvider(CassandraClusterProvider.class).asEagerSingleton();
bind(Session.class).toProvider(CassandraSessionProvider.class);
bind(MetadataDAO.class).to(CassandraMetadataDAO.class);
bind(ExecutionDAO.class).to(CassandraExecutionDAO.class);
bind(EventHandlerDAO.class).to(CassandraEventHandlerDAO.class);
}
protected static PreparedStatement listByPlaceStatement(Session session) {
CassandraQueryBuilder queryBuilder =
CassandraQueryBuilder
.select(RuleEnvironmentTable.NAME)
.addWhereColumnEquals(RuleEnvironmentTable.Column.PLACE_ID.columnName());
return RuleEnvironmentTable.addAllColumns(queryBuilder).prepare(session);
}
@Test
void initializeShouldReturnAlreadyDoneWhenTypeExists() {
KeyspaceMetadata keyspace = mock(KeyspaceMetadata.class);
when(keyspace.getUserType(NAME)).thenReturn(mock(UserType.class));
Session session = mock(Session.class);
assertThat(TYPE.initialize(keyspace, session))
.isEqualByComparingTo(ALREADY_DONE);
verify(keyspace).getUserType(NAME);
verify(session, never()).execute(STATEMENT);
}
@Override
public Mapper apply(Session session) {
if (mappingManager == null) {
this.mappingManager = new MappingManager(session);
}
return new DefaultObjectMapper<T>(mappingManager.mapper(entity));
}
@Override
public PreparedStatement prepare(Session session, String query, ConsistencyLevel consistency, RetryPolicy retryPolicy) {
PreparedStatement ps = session.prepare(query);
ps.setConsistencyLevel(consistency);
if(retryPolicy != null) {
ps.setRetryPolicy(retryPolicy);
}
return ps;
}
private Response getRecordByIdentifier(
String keyspaceName, String tableName, Object key, List<String> fields) {
long startTime = System.currentTimeMillis();
ProjectLogger.log(
"Cassandra Service getRecordBy key method started at ==" + startTime, LoggerEnum.INFO);
Response response = new Response();
try {
Session session = connectionManager.getSession(keyspaceName);
Builder selectBuilder;
if (CollectionUtils.isNotEmpty(fields)) {
selectBuilder = QueryBuilder.select(fields.toArray(new String[fields.size()]));
} else {
selectBuilder = QueryBuilder.select().all();
}
Select selectQuery = selectBuilder.from(keyspaceName, tableName);
Where selectWhere = selectQuery.where();
if (key instanceof String) {
selectWhere.and(eq(Constants.IDENTIFIER, key));
} else if (key instanceof Map) {
Map<String, Object> compositeKey = (Map<String, Object>) key;
compositeKey
.entrySet()
.stream()
.forEach(
x -> {
CassandraUtil.createQuery(x.getKey(), x.getValue(), selectWhere);
});
}
ResultSet results = session.execute(selectWhere);
response = CassandraUtil.createResponse(results);
} catch (Exception e) {
ProjectLogger.log(Constants.EXCEPTION_MSG_FETCH + tableName + " : " + e.getMessage(), e);
throw new ProjectCommonException(
ResponseCode.SERVER_ERROR.getErrorCode(),
ResponseCode.SERVER_ERROR.getErrorMessage(),
ResponseCode.SERVER_ERROR.getResponseCode());
}
logQueryElapseTime("getRecordByIdentifier", startTime);
return response;
}
private PreparedStatement prepareInsert(Session session) {
return session.prepare(
insertInto(TABLE_NAME)
.value(ATTACHMENT_ID_AS_UUID, bindMarker(ATTACHMENT_ID_AS_UUID))
.value(ATTACHMENT_ID, bindMarker(ATTACHMENT_ID))
.value(MESSAGE_ID, bindMarker(MESSAGE_ID)));
}