类org.hibernate.event.spi.EventSource源码实例Demo

下面列出了怎么用org.hibernate.event.spi.EventSource的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: lams   文件: AbstractFlushingEventListener.java
@SuppressWarnings( value = {"unchecked"} )
private void logFlushResults(FlushEvent event) {
	if ( !LOG.isDebugEnabled() ) {
		return;
	}
	final EventSource session = event.getSession();
	final PersistenceContext persistenceContext = session.getPersistenceContext();
	LOG.debugf(
			"Flushed: %s insertions, %s updates, %s deletions to %s objects",
			session.getActionQueue().numberOfInsertions(),
			session.getActionQueue().numberOfUpdates(),
			session.getActionQueue().numberOfDeletions(),
			persistenceContext.getNumberOfManagedEntities()
	);
	LOG.debugf(
			"Flushed: %s (re)creations, %s updates, %s removals to %s collections",
			session.getActionQueue().numberOfCollectionCreations(),
			session.getActionQueue().numberOfCollectionUpdates(),
			session.getActionQueue().numberOfCollectionRemovals(),
			persistenceContext.getCollectionEntries().size()
	);
	new EntityPrinter( session.getFactory() ).toString(
			persistenceContext.getEntitiesByKey().entrySet()
	);
}
 
源代码2 项目: lams   文件: Cascade.java
/**
 * Cascade an action to a to-one association or any type
 */
private static void cascadeToOne(
		final CascadingAction action,
		final EventSource eventSource,
		final Object parent,
		final Object child,
		final Type type,
		final CascadeStyle style,
		final Object anything,
		final boolean isCascadeDeleteEnabled) {
	final String entityName = type.isEntityType()
			? ( (EntityType) type ).getAssociatedEntityName()
			: null;
	if ( style.reallyDoCascade( action ) ) {
		//not really necessary, but good for consistency...
		eventSource.getPersistenceContext().addChildParent( child, parent );
		try {
			action.cascade( eventSource, child, entityName, anything, isCascadeDeleteEnabled );
		}
		finally {
			eventSource.getPersistenceContext().removeChildParent( child );
		}
	}
}
 
源代码3 项目: lams   文件: DefaultPersistEventListener.java
@SuppressWarnings({"unchecked"})
private void entityIsDeleted(PersistEvent event, Map createCache) {
	final EventSource source = event.getSession();

	final Object entity = source.getPersistenceContext().unproxy( event.getObject() );
	final EntityPersister persister = source.getEntityPersister( event.getEntityName(), entity );

	LOG.tracef(
			"un-scheduling entity deletion [%s]",
			MessageHelper.infoString(
					persister,
					persister.getIdentifier( entity, source ),
					source.getFactory()
			)
	);

	if ( createCache.put( entity, entity ) == null ) {
		justCascade( createCache, source, entity, persister );
	}
}
 
private CompletionStage<Object> doOnLoad(
		final EntityPersister persister,
		final LoadEvent event,
		final LoadEventListener.LoadType loadType) {

	final EventSource session = event.getSession();
	final EntityKey keyToLoad = session.generateEntityKey( event.getEntityId(), persister );
	if ( loadType.isNakedEntityReturned() ) {
		//do not return a proxy!
		//(this option indicates we are initializing a proxy)
		return load( event, persister, keyToLoad, loadType );
	}
	//return a proxy if appropriate
	else if ( event.getLockMode() == LockMode.NONE ) {
		return proxyOrLoad( event, persister, keyToLoad, loadType );
	}
	else {
		return lockAndLoad( event, persister, keyToLoad, loadType, session );
	}
}
 
/**
 * Performs the load of an entity.
 *
 * @param event The initiating load request event
 * @param persister The persister corresponding to the entity to be loaded
 * @param keyToLoad The key of the entity to be loaded
 * @param options The defined load options
 *
 * @return The loaded entity.
 */
private CompletionStage<Object> load( LoadEvent event, EntityPersister persister, EntityKey keyToLoad, LoadType options) {
	final EventSource session = event.getSession();
	if ( event.getInstanceToLoad() != null ) {
		if ( session.getPersistenceContextInternal().getEntry( event.getInstanceToLoad() ) != null ) {
			throw new PersistentObjectException(
					"attempted to load into an instance that was already associated with the session: " +
							MessageHelper.infoString( persister, event.getEntityId(), session.getFactory() ) );
		}
		persister.setIdentifier( event.getInstanceToLoad(), event.getEntityId(), session );
	}

	return doLoad( event, persister, keyToLoad, options )
			.thenApply( optional -> {
				boolean isOptionalInstance = event.getInstanceToLoad() != null;
				if ( optional==null && ( !options.isAllowNulls() || isOptionalInstance ) ) {
					throwEntityNotFound( session, event.getEntityClassName(), event.getEntityId() );
				}
				else if ( isOptionalInstance && optional != event.getInstanceToLoad() ) {
					throw new NonUniqueObjectException( event.getEntityId(), event.getEntityClassName() );
				}
				return optional;
			} );
}
 
源代码6 项目: lams   文件: AbstractSaveEventListener.java
/**
 * Prepares the save call using the given requested id.
 *
 * @param entity The entity to be saved.
 * @param requestedId The id to which to associate the entity.
 * @param entityName The name of the entity being saved.
 * @param anything Generally cascade-specific information.
 * @param source The session which is the source of this save event.
 *
 * @return The id used to save the entity.
 */
protected Serializable saveWithRequestedId(
		Object entity,
		Serializable requestedId,
		String entityName,
		Object anything,
		EventSource source) {
	callbackRegistry.preCreate( entity );

	return performSave(
			entity,
			requestedId,
			source.getEntityPersister( entityName, entity ),
			false,
			anything,
			source,
			true
	);
}
 
源代码7 项目: lams   文件: IteratorImpl.java
public IteratorImpl(
		ResultSet rs,
		PreparedStatement ps,
		EventSource sess,
		boolean readOnly,
		Type[] types,
		String[][] columnNames,
		HolderInstantiator holderInstantiator) throws HibernateException, SQLException {
	this.rs = rs;
	this.ps = ps;
	this.session = sess;
	this.readOnly = readOnly;
	this.types = types;
	this.names = columnNames;
	this.holderInstantiator = holderInstantiator;

	single = types.length == 1;

	postNext();
}
 
private void cascadeOnLock(LockEvent event, EntityPersister persister, Object entity) {
	EventSource source = event.getSession();
	final PersistenceContext persistenceContext = source.getPersistenceContextInternal();
	persistenceContext.incrementCascadeLevel();
	try {
		new Cascade(
				CascadingActions.LOCK,
				CascadePoint.AFTER_LOCK,
				persister,
				entity,
				event.getLockOptions(),
				source
		).cascade();
	}
	finally {
		persistenceContext.decrementCascadeLevel();
	}
}
 
private boolean wrapCollections(
		EventSource session,
		EntityPersister persister,
		Type[] types,
		Object[] values
) {
	if ( persister.hasCollections() ) {

		// wrap up any new collections directly referenced by the object
		// or its components

		// NOTE: we need to do the wrap here even if its not "dirty",
		// because collections need wrapping but changes to _them_
		// don't dirty the container. Also, for versioned data, we
		// need to wrap before calling searchForDirtyCollections

		WrapVisitor visitor = new WrapVisitor( session );
		// substitutes into values by side-effect
		visitor.processEntityPropertyValues( values, types );
		return visitor.isSubstitutionRequired();
	}
	else {
		return false;
	}
}
 
protected CompletionStage<Void> performExecutions(EventSource session) {
	LOG.trace( "Executing flush" );

	// IMPL NOTE : here we alter the flushing flag of the persistence context to allow
	//		during-flush callbacks more leniency in regards to initializing proxies and
	//		lazy collections during their processing.
	// For more information, see HHH-2763
	return CompletionStages.nullFuture()
			.thenCompose(v -> {
				session.getJdbcCoordinator().flushBeginning();
				session.getPersistenceContext().setFlushing( true );
				// we need to lock the collection caches before executing entity inserts/updates in order to
				// account for bi-directional associations
				actionQueue( session ).prepareActions();
				return actionQueue( session ).executeActions();
			} )
			.whenComplete( (v, x) -> {
				session.getPersistenceContext().setFlushing( false );
				session.getJdbcCoordinator().flushEnding();
			} );
}
 
源代码11 项目: lams   文件: DefaultMergeEventListener.java
private void saveTransientEntity(
		Object entity,
		String entityName,
		Serializable requestedId,
		EventSource source,
		Map copyCache) {
	//this bit is only *really* absolutely necessary for handling
	//requestedId, but is also good if we merge multiple object
	//graphs, since it helps ensure uniqueness
	if ( requestedId == null ) {
		saveWithGeneratedId( entity, entityName, copyCache, source, false );
	}
	else {
		saveWithRequestedId( entity, requestedId, entityName, copyCache, source );
	}
}
 
源代码12 项目: lams   文件: DefaultDeleteEventListener.java
/**
 * We encountered a delete request on a transient instance.
 * <p/>
 * This is a deviation from historical Hibernate (pre-3.2) behavior to
 * align with the JPA spec, which states that transient entities can be
 * passed to remove operation in which case cascades still need to be
 * performed.
 *
 * @param session The session which is the source of the event
 * @param entity The entity being delete processed
 * @param cascadeDeleteEnabled Is cascading of deletes enabled
 * @param persister The entity persister
 * @param transientEntities A cache of already visited transient entities
 * (to avoid infinite recursion).
 */
protected void deleteTransientEntity(
		EventSource session,
		Object entity,
		boolean cascadeDeleteEnabled,
		EntityPersister persister,
		Set transientEntities) {
	LOG.handlingTransientEntity();
	if ( transientEntities.contains( entity ) ) {
		LOG.trace( "Already handled transient entity; skipping" );
		return;
	}
	transientEntities.add( entity );
	cascadeBeforeDelete( session, persister, entity, null, transientEntities );
	cascadeAfterDelete( session, persister, entity, transientEntities );
}
 
源代码13 项目: lams   文件: AbstractFlushingEventListener.java
/**
 * Execute all SQL (and second-level cache updates) in a special order so that foreign-key constraints cannot
 * be violated: <ol>
 * <li> Inserts, in the order they were performed
 * <li> Updates
 * <li> Deletion of collection elements
 * <li> Insertion of collection elements
 * <li> Deletes, in the order they were performed
 * </ol>
 *
 * @param session The session being flushed
 */
protected void performExecutions(EventSource session) {
	LOG.trace( "Executing flush" );

	// IMPL NOTE : here we alter the flushing flag of the persistence context to allow
	//		during-flush callbacks more leniency in regards to initializing proxies and
	//		lazy collections during their processing.
	// For more information, see HHH-2763
	try {
		session.getJdbcCoordinator().flushBeginning();
		session.getPersistenceContext().setFlushing( true );
		// we need to lock the collection caches before executing entity inserts/updates in order to
		// account for bi-directional associations
		session.getActionQueue().prepareActions();
		session.getActionQueue().executeActions();
	}
	finally {
		session.getPersistenceContext().setFlushing( false );
		session.getJdbcCoordinator().flushEnding();
	}
}
 
源代码14 项目: lams   文件: Cascade.java
/**
 * Cascade an action to a collection
 */
private static void cascadeCollection(
		final CascadingAction action,
		final CascadePoint cascadePoint,
		final EventSource eventSource,
		final int componentPathStackDepth,
		final Object parent,
		final Object child,
		final CascadeStyle style,
		final Object anything,
		final CollectionType type) {
	final CollectionPersister persister = eventSource.getFactory().getCollectionPersister( type.getRole() );
	final Type elemType = persister.getElementType();

	CascadePoint elementsCascadePoint = cascadePoint;
	if ( cascadePoint == CascadePoint.AFTER_INSERT_BEFORE_DELETE ) {
		elementsCascadePoint = CascadePoint.AFTER_INSERT_BEFORE_DELETE_VIA_COLLECTION;
	}

	//cascade to current collection elements
	if ( elemType.isEntityType() || elemType.isAnyType() || elemType.isComponentType() ) {
		cascadeCollectionElements(
			action,
			elementsCascadePoint,
			eventSource,
			componentPathStackDepth,
			parent,
			child,
			type,
			style,
			elemType,
			anything,
			persister.isCascadeDeleteEnabled()
		);
	}
}
 
源代码15 项目: lams   文件: AbstractSaveEventListener.java
protected boolean invokeSaveLifecycle(Object entity, EntityPersister persister, EventSource source) {
	// Sub-insertions should occur before containing insertion so
	// Try to do the callback now
	if ( persister.implementsLifecycle() ) {
		LOG.debug( "Calling onSave()" );
		if ( ((Lifecycle) entity).onSave( source ) ) {
			LOG.debug( "Insertion vetoed by onSave()" );
			return true;
		}
	}
	return false;
}
 
源代码16 项目: lams   文件: OnReplicateVisitor.java
@Override
public Object processCollection(Object collection, CollectionType type) throws HibernateException {
	if ( collection == CollectionType.UNFETCHED_COLLECTION ) {
		return null;
	}

	final EventSource session = getSession();
	final CollectionPersister persister = session.getFactory().getMetamodel().collectionPersister( type.getRole() );

	if ( isUpdate ) {
		removeCollection( persister, extractCollectionKeyFromOwner( persister ), session );
	}
	if ( collection != null && collection instanceof PersistentCollection ) {
		final PersistentCollection wrapper = (PersistentCollection) collection;
		wrapper.setCurrentSession( (SessionImplementor) session );
		if ( wrapper.wasInitialized() ) {
			session.getPersistenceContext().addNewCollection( persister, wrapper );
		}
		else {
			reattachCollection( wrapper, type );
		}
	}
	else {
		// otherwise a null or brand new collection
		// this will also (inefficiently) handle arrays, which
		// have no snapshot, so we can't do any better
		//processArrayOrNewCollection(collection, type);
	}

	return null;

}
 
源代码17 项目: hibernate-reactive   文件: CascadingActions.java
@Override
public CompletionStage<?> cascade(
		EventSource session,
		Object child,
		String entityName,
		IdentitySet context,
		boolean isCascadeDeleteEnabled) {
	LOG.tracev( "Cascading to delete: {0}", entityName );
	return session.unwrap(ReactiveSession.class).reactiveFetch( child, true )
			.thenCompose( c -> session.unwrap(ReactiveSession.class)
					.reactiveRemove( c, isCascadeDeleteEnabled, context ) );
}
 
源代码18 项目: lams   文件: AbstractFlushingEventListener.java
/**
 * Coordinates the processing necessary to get things ready for executions
 * as db calls by preping the session caches and moving the appropriate
 * entities and collections to their respective execution queues.
 *
 * @param event The flush event.
 * @throws HibernateException Error flushing caches to execution queues.
 */
protected void flushEverythingToExecutions(FlushEvent event) throws HibernateException {

	LOG.trace( "Flushing session" );

	EventSource session = event.getSession();

	final PersistenceContext persistenceContext = session.getPersistenceContext();
	session.getInterceptor().preFlush( new LazyIterator( persistenceContext.getEntitiesByKey() ) );

	prepareEntityFlushes( session, persistenceContext );
	// we could move this inside if we wanted to
	// tolerate collection initializations during
	// collection dirty checking:
	prepareCollectionFlushes( persistenceContext );
	// now, any collections that are initialized
	// inside this block do not get updated - they
	// are ignored until the next flush

	persistenceContext.setFlushing( true );
	try {
		int entityCount = flushEntities( event, persistenceContext );
		int collectionCount = flushCollections( session, persistenceContext );

		event.setNumberOfEntitiesProcessed( entityCount );
		event.setNumberOfCollectionsProcessed( collectionCount );
	}
	finally {
		persistenceContext.setFlushing(false);
	}

	//some statistics
	logFlushResults( event );
}
 
源代码19 项目: hibernate-reactive   文件: CascadingActions.java
@Override
public CompletionStage<?> cascade(
		EventSource session,
		Object child,
		String entityName,
		MergeContext context,
		boolean isCascadeDeleteEnabled)
		throws HibernateException {
	LOG.tracev("Cascading to refresh: {0}", entityName);
	return session.unwrap(ReactiveSession.class).reactiveMerge( child, context );
}
 
源代码20 项目: lams   文件: DefaultFlushEventListener.java
/** Handle the given flush event.
 *
 * @param event The flush event to be handled.
 * @throws HibernateException
 */
public void onFlush(FlushEvent event) throws HibernateException {
	final EventSource source = event.getSession();
	final PersistenceContext persistenceContext = source.getPersistenceContext();

	if ( persistenceContext.getNumberOfManagedEntities() > 0 ||
			persistenceContext.getCollectionEntries().size() > 0 ) {

		try {
			source.getEventListenerManager().flushStart();

			flushEverythingToExecutions( event );
			performExecutions( source );
			postFlush( source );
		}
		finally {
			source.getEventListenerManager().flushEnd(
					event.getNumberOfEntitiesProcessed(),
					event.getNumberOfCollectionsProcessed()
			);
		}

		postPostFlush( source );

		if ( source.getFactory().getStatistics().isStatisticsEnabled() ) {
			source.getFactory().getStatistics().flush();
		}
	}
}
 
private CompletionStage<Void> cascadeRefresh(
		EventSource source,
		EntityPersister persister,
		Object object,
		IdentitySet refreshedAlready) {
	return new Cascade<>(
			CascadingActions.REFRESH,
			CascadePoint.BEFORE_REFRESH,
			persister,
			object,
			refreshedAlready,
			source
	).cascade();
}
 
protected CompletionStage<Void> entityIsPersistent(PersistEvent event, IdentitySet createCache) {
	LOG.trace( "Ignoring persistent instance" );
	final EventSource source = event.getSession();

	//TODO: check that entry.getIdentifier().equals(requestedId)

	final Object entity = source.getPersistenceContextInternal().unproxy( event.getObject() );
	final EntityPersister persister = source.getEntityPersister( event.getEntityName(), entity );

	if ( createCache.add( entity ) ) {
		return justCascade( createCache, source, entity, persister );
	}
	return CompletionStages.nullFuture();
}
 
源代码23 项目: lams   文件: DefaultReplicateEventListener.java
private void performReplication(
		Object entity,
		Serializable id,
		Object version,
		EntityPersister persister,
		ReplicationMode replicationMode,
		EventSource source) throws HibernateException {

	if ( LOG.isTraceEnabled() ) {
		LOG.tracev( "Replicating changes to {0}", MessageHelper.infoString( persister, id, source.getFactory() ) );
	}

	new OnReplicateVisitor( source, id, entity, true ).process( entity, persister );

	source.getPersistenceContext().addEntity(
			entity,
			( persister.isMutable() ? Status.MANAGED : Status.READ_ONLY ),
			null,
			source.generateEntityKey( id, persister ),
			version,
			LockMode.NONE,
			true,
			persister,
			true
	);

	cascadeAfterReplicate( entity, persister, replicationMode, source );
}
 
private CompletionStage<Void> loadByDerivedIdentitySimplePkValue(LoadEvent event, LoadEventListener.LoadType options,
		EntityPersister dependentPersister, EmbeddedComponentType dependentIdType, EntityPersister parentPersister) {
	EventSource session = event.getSession();
	final EntityKey parentEntityKey = session.generateEntityKey( event.getEntityId(), parentPersister );
	return doLoad( event, parentPersister, parentEntityKey, options )
			.thenApply( checkEntityFound( session, parentEntityKey.getEntityName(), parentEntityKey ) )
			.thenApply( parent -> {
				final Serializable dependent = (Serializable) dependentIdType.instantiate( parent, session );
				dependentIdType.setPropertyValues( dependent, new Object[] {parent}, dependentPersister.getEntityMode() );
				event.setEntityId( dependent );
				return session.generateEntityKey( dependent, dependentPersister );
			} )
			.thenCompose( dependentEntityKey -> doLoad( event, dependentPersister, dependentEntityKey, options ) )
			.thenAccept( event::setResult );
}
 
private void cacheNaturalId(LoadEvent event, EntityPersister persister, EventSource session, Object entity) {
	if ( entity != null && persister.hasNaturalIdentifier() ) {
		final PersistenceContext persistenceContext = session.getPersistenceContextInternal();
		final PersistenceContext.NaturalIdHelper naturalIdHelper = persistenceContext.getNaturalIdHelper();
		naturalIdHelper.cacheNaturalIdCrossReferenceFromLoad(
				persister,
				event.getEntityId(),
				naturalIdHelper.extractNaturalIdValues(
						entity,
						persister
				)
		);
	}
}
 
private void disallowDeletionOfDetached(DeleteEvent event) {
	EventSource source = event.getSession();
	String entityName = event.getEntityName();
	EntityPersister persister = source.getEntityPersister( entityName, event.getObject() );
	Serializable id = persister.getIdentifier( event.getObject(), source );
	entityName = entityName == null ? source.guessEntityName( event.getObject() ) : entityName;
	throw new IllegalArgumentException( "Removing a detached instance " + entityName + "#" + id );
}
 
private Object[] createDeletedState(EntityPersister persister, Object[] currentState, EventSource session) {
		Type[] propTypes = persister.getPropertyTypes();
		final Object[] deletedState = new Object[propTypes.length];
//      TypeFactory.deepCopy( currentState, propTypes, persister.getPropertyUpdateability(), deletedState, session );
		boolean[] copyability = new boolean[propTypes.length];
		java.util.Arrays.fill( copyability, true );
		TypeHelper.deepCopy( currentState, propTypes, copyability, deletedState, session );
		return deletedState;
	}
 
源代码28 项目: lams   文件: DefaultEvictEventListener.java
protected void doEvict(
		final Object object,
		final EntityKey key,
		final EntityPersister persister,
		final EventSource session)
		throws HibernateException {

	if ( LOG.isTraceEnabled() ) {
		LOG.tracev( "Evicting {0}", MessageHelper.infoString( persister ) );
	}

	if ( persister.hasNaturalIdentifier() ) {
		session.getPersistenceContext().getNaturalIdHelper().handleEviction(
				object,
				persister,
				key.getIdentifier()
		);
	}

	// remove all collections for the entity from the session-level cache
	if ( persister.hasCollections() ) {
		new EvictVisitor( session, object ).process( object, persister );
	}

	// remove any snapshot, not really for memory management purposes, but
	// rather because it might now be stale, and there is no longer any
	// EntityEntry to take precedence
	// This is now handled by removeEntity()
	//session.getPersistenceContext().removeDatabaseSnapshot(key);
	
	session.getPersistenceContext().removeEntity( key );
	session.getPersistenceContext().removeEntry( object );

	Cascade.cascade( CascadingActions.EVICT, CascadePoint.AFTER_EVICT, session, persister, object );
}
 
/**
 * Flushes a single entity's state to the database, by scheduling
 * an update action, if necessary
 */
public void onFlushEntity(FlushEntityEvent event) throws HibernateException {
	final Object entity = event.getEntity();
	final EntityEntry entry = event.getEntityEntry();
	final EventSource session = event.getSession();
	final EntityPersister persister = entry.getPersister();
	final Status status = entry.getStatus();
	final Type[] types = persister.getPropertyTypes();

	final boolean mightBeDirty = entry.requiresDirtyCheck( entity );

	final Object[] values = getValues( entity, entry, mightBeDirty, session );

	event.setPropertyValues( values );

	//TODO: avoid this for non-new instances where mightBeDirty==false
	boolean substitute = wrapCollections( session, persister, types, values );

	if ( isUpdateNecessary( event, mightBeDirty ) ) {
		substitute = scheduleUpdate( event ) || substitute;
	}

	if ( status != Status.DELETED ) {
		// now update the object .. has to be outside the main if block above (because of collections)
		if ( substitute ) {
			persister.setPropertyValues( entity, values );
		}

		// Search for collections by reachability, updating their role.
		// We don't want to touch collections reachable from a deleted object
		if ( persister.hasCollections() ) {
			new FlushVisitor( session, entity ).processEntityPropertyValues( values, types );
		}
	}
}
 
/**
	 * Coordinates the processing necessary to get things ready for executions
	 * as db calls by preping the session caches and moving the appropriate
	 * entities and collections to their respective execution queues.
	 *
	 * @param event The flush event.
	 * @throws HibernateException Error flushing caches to execution queues.
	 */
	protected CompletionStage<Void> flushEverythingToExecutions(FlushEvent event) throws HibernateException {

		LOG.trace( "Flushing session" );

		EventSource session = event.getSession();

		final PersistenceContext persistenceContext = session.getPersistenceContextInternal();
		session.getInterceptor().preFlush( persistenceContext.managedEntitiesIterator() );

		CompletionStage<Void> cascades = prepareEntityFlushes(session, persistenceContext);
		// we could move this inside if we wanted to
		// tolerate collection initializations during
		// collection dirty checking:
		prepareCollectionFlushes( persistenceContext );
		// now, any collections that are initialized
		// inside this block do not get updated - they
		// are ignored until the next flush

		return cascades.thenAccept( v -> {
			persistenceContext.setFlushing(true);
			try {
				int entityCount = flushEntities(event, persistenceContext);
				int collectionCount = flushCollections(session, persistenceContext);

				event.setNumberOfEntitiesProcessed(entityCount);
				event.setNumberOfCollectionsProcessed(collectionCount);
			}
			finally {
				persistenceContext.setFlushing(false);
			}
		});

		//some statistics
//		logFlushResults( event );
	}
 
 类所在包
 同包方法