类org.hibernate.jdbc.Expectation源码实例Demo

下面列出了怎么用org.hibernate.jdbc.Expectation的API类实例代码及写法,或者点击链接到github查看源代码。

@Override
public boolean check(int rows, Serializable id, int tableNumber,
					 Expectation expectation, PreparedStatement statement) throws HibernateException {
	return super.check(rows, id, tableNumber, expectation, statement);
}
 
@Override
public boolean check(int rows, Serializable id, int tableNumber,
					 Expectation expectation, PreparedStatement statement) throws HibernateException {
	return super.check(rows, id, tableNumber, expectation, statement);
}
 
default CompletionStage<?> insertReactive(
			Serializable id,
			Object[] fields,
			boolean[] notNull,
			int j,
			String sql,
			Object object,
			SharedSessionContractImplementor session) throws HibernateException {

		if ( delegate().isInverseTable( j ) ) {
			return CompletionStages.nullFuture();
		}

		//note: it is conceptually possible that a UserType could map null to
		//	  a non-null value, so the following is arguable:
		if ( delegate().isNullableTable( j ) && delegate().isAllNull( fields, j ) ) {
			return CompletionStages.nullFuture();
		}

		if ( log.isTraceEnabled() ) {
			log.tracev( "Inserting entity: {0}", infoString(delegate(), id, delegate().getFactory() ) );
			if ( j == 0 && delegate().isVersioned() ) {
				log.tracev( "Version: {0}", Versioning.getVersion( fields, delegate()) );
			}
		}

		// TODO : shouldn't inserts be Expectations.NONE?
		final Expectation expectation = appropriateExpectation( delegate().getInsertResultCheckStyles()[j] );
//		final int jdbcBatchSizeToUse = session.getConfiguredJdbcBatchSize();
//		final boolean useBatch = expectation.canBeBatched() &&
//				jdbcBatchSizeToUse > 1 &&
//				delegate.getIdentifierGenerator().supportsJdbcBatchInserts();

//		if ( useBatch && insertBatchKey == null ) {
//			insertBatchKey = new BasicBatchKey(
//					delegate.getEntityName() + "#INSERT",
//					expectation
//			);
//		}
//		final boolean callable = delegate.isInsertCallable( j );

		Object[] params = PreparedStatementAdaptor.bind( insert -> {
			boolean[][] insertable = delegate().getPropertyColumnInsertable();
			int index = delegate().dehydrate( null, fields, notNull, insertable, j, insert, session, false );
			delegate().getIdentifierType().nullSafeSet( insert, id, index, session );
		} );

		return getReactiveConnection( session )
				.update( sql, params )
				.thenAccept( count -> {
					try {
						expectation.verifyOutcome(count, new PreparedStatementAdaptor(), -1);
					}
					catch (SQLException e) {
						//can't actually occur!
						throw new JDBCException( "error while verifying result count", e );
					}
				});
	}
 
default CompletionStage<?> deleteReactive(
			Serializable id,
			Object version,
			int j,
			Object object,
			String sql,
			SharedSessionContractImplementor session,
			Object[] loadedState) throws HibernateException {

		if ( delegate().isInverseTable( j ) ) {
			return CompletionStages.nullFuture();
		}
		final boolean useVersion = j == 0 && delegate().isVersioned();
//		final boolean callable = delegate.isDeleteCallable( j );
		final Expectation expectation = appropriateExpectation( delegate().getDeleteResultCheckStyles()[j] );
//		final boolean useBatch = j == 0 && delegate.isBatchable() && expectation.canBeBatched();
//		if ( useBatch && deleteBatchKey == null ) {
//			deleteBatchKey = new BasicBatchKey(
//					delegate.getEntityName() + "#DELETE",
//					expectation
//			);
//		}

		if ( log.isTraceEnabled() ) {
			log.tracev( "Deleting entity: {0}", infoString(delegate(), id, delegate().getFactory() ) );
			if ( useVersion ) {
				log.tracev( "Version: {0}", version );
			}
		}

		if ( delegate().isTableCascadeDeleteEnabled( j ) ) {
			if ( log.isTraceEnabled() ) {
				log.tracev( "Delete handled by foreign key constraint: {0}", delegate().getTableName( j ) );
			}
			//EARLY EXIT!
			return CompletionStages.nullFuture();
		}

		//Render the SQL query
		Object[] params = PreparedStatementAdaptor.bind( delete -> {
			int index = 1;

			index += expectation.prepare( delete );

			// Do the key. The key is immutable so we can use the _current_ object state - not necessarily
			// the state at the time the delete was issued
			delegate().getIdentifierType().nullSafeSet( delete, id, index, session );
			index += delegate().getIdentifierColumnSpan();

			// We should use the _current_ object state (ie. after any updates that occurred during flush)
			if ( useVersion ) {
				delegate().getVersionType().nullSafeSet( delete, version, index, session );
			}
			else if ( isAllOrDirtyOptimisticLocking() && loadedState != null ) {
				boolean[] versionability = delegate().getPropertyVersionability();
				Type[] types = delegate().getPropertyTypes();
				for (int i = 0; i < delegate().getEntityMetamodel().getPropertySpan(); i++ ) {
					if ( delegate().isPropertyOfTable( i, j ) && versionability[i] ) {
						// this property belongs to the table and it is not specifically
						// excluded from optimistic locking by optimistic-lock="false"
						boolean[] settable = types[i].toColumnNullness( loadedState[i], delegate().getFactory() );
						types[i].nullSafeSet( delete, loadedState[i], index, settable, session );
						index += ArrayHelper.countTrue( settable );
					}
				}
			}
		} );

		return getReactiveConnection(session)
				.update( sql, params )
				.thenAccept( count -> check( count, id, j, expectation, new PreparedStatementAdaptor() ) );
	}
 
default CompletionStage<Boolean> updateReactive(
			final Serializable id,
			final Object[] fields,
			final Object[] oldFields,
			final Object rowId,
			final boolean[] includeProperty,
			final int j,
			final Object oldVersion,
			final Object object,
			final String sql,
			final SharedSessionContractImplementor session) throws HibernateException {

		final Expectation expectation = appropriateExpectation( delegate().getUpdateResultCheckStyles()[j] );
//		final int jdbcBatchSizeToUse = session.getConfiguredJdbcBatchSize();
//		final boolean useBatch = expectation.canBeBatched() && isBatchable() && jdbcBatchSizeToUse > 1;
//		if ( useBatch && updateBatchKey == null ) {
//			updateBatchKey = new BasicBatchKey(
//					delegate.getEntityName() + "#UPDATE",
//					expectation
//			);
//		}
//		final boolean callable = delegate.isUpdateCallable( j );
		final boolean useVersion = j == 0 && delegate().isVersioned();

		if ( log.isTraceEnabled() ) {
			log.tracev( "Updating entity: {0}", infoString(delegate(), id, delegate().getFactory() ) );
			if ( useVersion ) {
				log.tracev( "Existing version: {0} -> New version:{1}", oldVersion, fields[delegate().getVersionProperty()] );
			}
		}

//			if ( useBatch ) {
//				update = session
//						.getJdbcCoordinator()
//						.getBatch( updateBatchKey )
//						.getBatchStatement( sql, callable );
//			}

		Object[] params = PreparedStatementAdaptor.bind( update -> {
			int index = 1;
			index += expectation.prepare( update );

			//Now write the values of fields onto the prepared statement
			index = delegate().dehydrate(
					id,
					fields,
					rowId,
					includeProperty,
					delegate().getPropertyColumnUpdateable(),
					j,
					update,
					session,
					index,
					true
			);

			// Write any appropriate versioning conditional parameters
			if ( useVersion && delegate().getEntityMetamodel().getOptimisticLockStyle() == OptimisticLockStyle.VERSION ) {
				if ( delegate().checkVersion( includeProperty ) ) {
					delegate().getVersionType().nullSafeSet( update, oldVersion, index, session );
				}
			}
			else if ( isAllOrDirtyOptimisticLocking() && oldFields != null ) {
				boolean[] versionability = delegate().getPropertyVersionability(); //TODO: is this really necessary????
				boolean[] includeOldField = delegate().getEntityMetamodel().getOptimisticLockStyle() == OptimisticLockStyle.ALL
						? delegate().getPropertyUpdateability()
						: includeProperty;
				Type[] types = delegate().getPropertyTypes();
				for (int i = 0; i < delegate().getEntityMetamodel().getPropertySpan(); i++ ) {
					boolean include = includeOldField[i] &&
							delegate().isPropertyOfTable( i, j ) &&
							versionability[i]; //TODO: is this really necessary????
					if ( include ) {
						boolean[] settable = types[i].toColumnNullness( oldFields[i], delegate().getFactory() );
						types[i].nullSafeSet(
								update,
								oldFields[i],
								index,
								settable,
								session
						);
						index += ArrayHelper.countTrue( settable );
					}
				}
			}
		} );

//				if ( useBatch ) {
//					session.getJdbcCoordinator().getBatch( updateBatchKey ).addToBatch();
//					return true;
//				}

		return getReactiveConnection(session)
				.update( sql, params )
				.thenApply( count -> check( count, id, j, expectation, new PreparedStatementAdaptor() ) );
	}
 
boolean check(
int rows,
Serializable id,
int tableNumber,
Expectation expectation,
PreparedStatement statement) throws HibernateException;
 
@Override
public boolean check(int rows, Serializable id, int tableNumber,
					 Expectation expectation, PreparedStatement statement) throws HibernateException {
	return super.check(rows, id, tableNumber, expectation, statement);
}
 
源代码8 项目: lams   文件: BasicCollectionPersister.java
@Override
protected int doUpdateRows(Serializable id, PersistentCollection collection, SharedSessionContractImplementor session)
		throws HibernateException {
	if ( ArrayHelper.isAllFalse( elementColumnIsSettable ) ) {
		return 0;
	}

	try {
		final Expectation expectation = Expectations.appropriateExpectation( getUpdateCheckStyle() );
		final boolean callable = isUpdateCallable();
		final int jdbcBatchSizeToUse = session.getConfiguredJdbcBatchSize();
		boolean useBatch = expectation.canBeBatched() && jdbcBatchSizeToUse > 1;
		final Iterator entries = collection.entries( this );

		final List elements = new ArrayList();
		while ( entries.hasNext() ) {
			elements.add( entries.next() );
		}

		final String sql = getSQLUpdateRowString();
		int count = 0;
		if ( collection.isElementRemoved() ) {
			// the update should be done starting from the end to the list
			for ( int i = elements.size() - 1; i >= 0; i-- ) {
				count = doUpdateRow(
						id,
						collection,
						session,
						expectation,
						callable,
						useBatch,
						elements,
						sql,
						count,
						i
				);
			}
		}
		else {
			for ( int i = 0; i < elements.size(); i++ ) {
				count = doUpdateRow(
						id,
						collection,
						session,
						expectation,
						callable,
						useBatch,
						elements,
						sql,
						count,
						i
				);
			}
		}
		return count;
	}
	catch (SQLException sqle) {
		throw session.getJdbcServices().getSqlExceptionHelper().convert(
				sqle,
				"could not update collection rows: " + MessageHelper.collectionInfoString(
						this,
						collection,
						id,
						session
				),
				getSQLUpdateRowString()
		);
	}
}
 
源代码9 项目: lams   文件: BasicCollectionPersister.java
private int doUpdateRow(
		Serializable id,
		PersistentCollection collection,
		SharedSessionContractImplementor session,
		Expectation expectation, boolean callable, boolean useBatch, List elements, String sql, int count, int i)
		throws SQLException {
	PreparedStatement st;
	Object entry = elements.get( i );
	if ( collection.needsUpdating( entry, i, elementType ) ) {
		int offset = 1;

		if ( useBatch ) {
			if ( updateBatchKey == null ) {
				updateBatchKey = new BasicBatchKey(
						getRole() + "#UPDATE",
						expectation
				);
			}
			st = session
					.getJdbcCoordinator()
					.getBatch( updateBatchKey )
					.getBatchStatement( sql, callable );
		}
		else {
			st = session
					.getJdbcCoordinator()
					.getStatementPreparer()
					.prepareStatement( sql, callable );
		}

		try {
			offset += expectation.prepare( st );
			int loc = writeElement( st, collection.getElement( entry ), offset, session );
			if ( hasIdentifier ) {
				writeIdentifier( st, collection.getIdentifier( entry, i ), loc, session );
			}
			else {
				loc = writeKey( st, id, loc, session );
				if ( hasIndex && !indexContainsFormula ) {
					writeIndexToWhere( st, collection.getIndex( entry, i, this ), loc, session );
				}
				else {
					writeElementToWhere( st, collection.getSnapshotElement( entry, i ), loc, session );
				}
			}

			if ( useBatch ) {
				session.getJdbcCoordinator()
						.getBatch( updateBatchKey )
						.addToBatch();
			}
			else {
				expectation.verifyOutcome(
						session.getJdbcCoordinator().getResultSetReturn().executeUpdate(
								st
						), st, -1
				);
			}
		}
		catch (SQLException sqle) {
			if ( useBatch ) {
				session.getJdbcCoordinator().abortBatch();
			}
			throw sqle;
		}
		finally {
			if ( !useBatch ) {
				session.getJdbcCoordinator().getLogicalConnection().getResourceRegistry().release( st );
				session.getJdbcCoordinator().afterStatementExecution();
			}
		}
		count++;
	}
	return count;
}
 
源代码10 项目: lams   文件: BasicBatchKey.java
@Override
public Expectation getExpectation() {
	return expectation;
}
 
源代码11 项目: cacheonix-core   文件: InsertOrderingTest.java
public void addToBatch(Expectation expectation) throws SQLException, HibernateException {
	Counter counter = ( Counter ) batchSizes.get( currentBatch );
	counter.count++;
	System.out.println( "Adding to batch [" + batchSQL + "]" );
	super.addToBatch( expectation );
}
 
源代码12 项目: lams   文件: BatchKey.java
/**
 * Get the expectation pertaining to the outcome of the {@link Batch} associated with this key.
 *
 * @return The expectations
 */
Expectation getExpectation();
 
源代码13 项目: lams   文件: BasicBatchKey.java
/**
 * Constructs a BasicBatchKey
 *
 * @param comparison A string used to compare batch keys.
 * @param expectation The expectation for the batch
 */
public BasicBatchKey(String comparison, Expectation expectation) {
	this.comparison = comparison;
	this.statementCount = 1;
	this.expectation = expectation;
}
 
 类所在包
 类方法
 同包方法