java.sql.PreparedStatement#executeBatch ( )源码实例Demo

下面列出了java.sql.PreparedStatement#executeBatch ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

@Test
public void testMorePersistedSavepointsInBatch() throws Exception {
    ResultSet rs = conn1.query("call SYSCS_UTIL.SYSCS_GET_CURRENT_TRANSACTION()");
    rs.next();
    PreparedStatement ps = conn1.prepareStatement(String.format("insert into %s.e (i) values (?)", CLASS_NAME));
    long txnId = rs.getLong(1);
    long iterations = (SIConstants.TRASANCTION_INCREMENT - 2) * 4;
    for (int i = 0; i < iterations; ++i) {
        ps.setInt(1, i);
        ps.addBatch();
    }
    ps.executeBatch();
    conn1.commit();
    long count = conn1.count("select * from e");
    assertEquals("Incorrect count after savepoint release!",iterations,count);

    rs = conn1.query("call SYSCS_UTIL.SYSCS_GET_CURRENT_TRANSACTION()");
    rs.next();
    long txnIdLater = rs.getLong(1);

    // The difference should be 0x1000: the batch created up to 14 persisted txns, that's 28 timestamps (begin + commit),
    // the user transaction committed (1 ts) and then the new user transaction started (1 ts)
    Assert.assertTrue("Created more persisted transactions than expected, difference = " + (txnIdLater - txnId), txnIdLater <= txnId + 0x1E00);
}
 
源代码2 项目: clickhouse-jdbc   文件: ClickhouseLZ4StreamTest.java
@Test
public void testBigBatchCompressedInsert() throws SQLException {
    connection.createStatement().execute("DROP TABLE IF EXISTS test.big_batch_insert");
    connection.createStatement().execute(
            "CREATE TABLE IF NOT EXISTS test.big_batch_insert (i Int32, s String) ENGINE = TinyLog"
    );

    PreparedStatement statement = connection.prepareStatement("INSERT INTO test.big_batch_insert (s, i) VALUES (?, ?)");

    int cnt = 1000000;
    for (int i = 0; i < cnt; i++) {
        statement.setString(1, "string" + i);
        statement.setInt(2, i);
        statement.addBatch();
    }

    statement.executeBatch();

    ResultSet rs = connection.createStatement().executeQuery("SELECT count() as cnt from test.big_batch_insert");
    rs.next();
    Assert.assertEquals(rs.getInt("cnt"), cnt);
    Assert.assertFalse(rs.next());
}
 
源代码3 项目: carbon-device-mgt   文件: AbstractFeatureDAO.java
@Override
public List<ProfileFeature> updateProfileFeatures(List<ProfileFeature> features, int profileId) throws
                                                                                                FeatureManagerDAOException {

    Connection conn;
    PreparedStatement stmt = null;
    int tenantId = PrivilegedCarbonContext.getThreadLocalCarbonContext().getTenantId();

    try {
        conn = this.getConnection();
        String query = "UPDATE DM_PROFILE_FEATURES SET CONTENT = ? WHERE PROFILE_ID = ? AND FEATURE_CODE = ? AND" +
                       " TENANT_ID = ?";

        stmt = conn.prepareStatement(query);
        for (ProfileFeature feature : features) {
            stmt.setBytes(1, PolicyManagerUtil.getBytes(feature.getContent()));
            stmt.setInt(2, profileId);
            stmt.setString(3, feature.getFeatureCode());
            stmt.setInt(4, tenantId);
            stmt.addBatch();
            //Not adding the logic to check the size of the stmt and execute if the size records added is over 1000
        }
        stmt.executeBatch();

    } catch (SQLException | IOException e) {
        throw new FeatureManagerDAOException("Error occurred while adding the feature list to the database.", e);
    } finally {
        PolicyManagementDAOUtil.cleanupResources(stmt, null);
    }
    return features;
}
 
@Test
public void testInsertQuery() {
    String url = ConnectionUtil.getEthUrl();
    String driverClass = "com.impetus.eth.jdbc.EthDriver";
    try {
        Class.forName(driverClass);
        Properties prop = new Properties();
        prop.put(DriverConstants.KEYSTORE_PATH,ConnectionUtil.getKeyStorePath());
        prop.put(DriverConstants.KEYSTORE_PASSWORD,ConnectionUtil.getKeyStorePassword());
        Connection conn = DriverManager.getConnection(url, prop);
        String query = "insert into transaction (toAddress, value, unit, async) values (?, ?, 'ether', false)";

        PreparedStatement stmt = conn.prepareStatement(query);
        stmt.setObject(2, 0.0001);
        stmt.setObject(1, "0x8144c67b144A408ABC989728e32965EDf37Adaa1");
        stmt.addBatch();
        stmt.addBatch();

        stmt.setObject(2, 0.0003);
        stmt.addBatch();

        stmt.executeBatch();

        Field f = stmt.getClass().getDeclaredField("batchList");
        f.setAccessible(true);

        assert(((List<Object[]>) f.get(stmt)).size() == 0);


    }catch(Exception e){
        e.printStackTrace();
    }
}
 
private void executeStatement(PreparedStatement statement, AtomicInteger statementCount) throws SQLException {
    statement.addBatch();
    int count = statementCount.incrementAndGet();
    if(count % getBatchSize() == 0) {
        statement.executeBatch();
    }
}
 
源代码6 项目: cloudstack   文件: UsageVmDiskDaoImpl.java
@Override
public void saveUsageVmDisks(List<UsageVmDiskVO> usageVmDisks) {
    TransactionLegacy txn = TransactionLegacy.currentTxn();
    try {
        txn.start();
        String sql = INSERT_USAGE_VM_DISK;
        PreparedStatement pstmt = null;
        pstmt = txn.prepareAutoCloseStatement(sql); // in reality I just want CLOUD_USAGE dataSource connection
        for (UsageVmDiskVO usageVmDisk : usageVmDisks) {
            pstmt.setLong(1, usageVmDisk.getAccountId());
            pstmt.setLong(2, usageVmDisk.getZoneId());
            pstmt.setLong(3, usageVmDisk.getVmId());
            pstmt.setLong(4, usageVmDisk.getVolumeId());
            pstmt.setLong(5, usageVmDisk.getIORead());
            pstmt.setLong(6, usageVmDisk.getIOWrite());
            pstmt.setLong(7, usageVmDisk.getAggIORead());
            pstmt.setLong(8, usageVmDisk.getAggIOWrite());
            pstmt.setLong(9, usageVmDisk.getBytesRead());
            pstmt.setLong(10, usageVmDisk.getBytesWrite());
            pstmt.setLong(11, usageVmDisk.getAggBytesRead());
            pstmt.setLong(12, usageVmDisk.getAggBytesWrite());
            pstmt.setLong(13, usageVmDisk.getEventTimeMillis());
            pstmt.addBatch();
        }
        pstmt.executeBatch();
        txn.commit();
    } catch (Exception ex) {
        txn.rollback();
        s_logger.error("error saving usage_vm_disk to cloud_usage db", ex);
        throw new CloudRuntimeException(ex.getMessage());
    }
}
 
源代码7 项目: cloudstack   文件: UsageNetworkDaoImpl.java
@Override
public void saveUsageNetworks(List<UsageNetworkVO> usageNetworks) {
    TransactionLegacy txn = TransactionLegacy.currentTxn();
    try {
        txn.start();
        String sql = INSERT_USAGE_NETWORK;
        PreparedStatement pstmt = null;
        pstmt = txn.prepareAutoCloseStatement(sql); // in reality I just want CLOUD_USAGE dataSource connection
        for (UsageNetworkVO usageNetwork : usageNetworks) {
            pstmt.setLong(1, usageNetwork.getAccountId());
            pstmt.setLong(2, usageNetwork.getZoneId());
            pstmt.setLong(3, usageNetwork.getHostId());
            pstmt.setString(4, usageNetwork.getHostType());
            pstmt.setLong(5, usageNetwork.getNetworkId());
            pstmt.setLong(6, usageNetwork.getBytesSent());
            pstmt.setLong(7, usageNetwork.getBytesReceived());
            pstmt.setLong(8, usageNetwork.getAggBytesReceived());
            pstmt.setLong(9, usageNetwork.getAggBytesSent());
            pstmt.setLong(10, usageNetwork.getEventTimeMillis());
            pstmt.addBatch();
        }
        pstmt.executeBatch();
        txn.commit();
    } catch (Exception ex) {
        txn.rollback();
        s_logger.error("error saving usage_network to cloud_usage db", ex);
        throw new CloudRuntimeException(ex.getMessage());
    }
}
 
源代码8 项目: gemfirexd-oss   文件: GfxdWanCommonTestBase.java
public static Runnable prepInsert() {
  CacheSerializableRunnable senderConf = new CacheSerializableRunnable(
      "Sender Configurator") {
    @Override
    public void run2() throws CacheException {
      try {
        Connection conn = TestUtil.jdbcConn;
        conn.setAutoCommit(false);
        conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
        PreparedStatement prep = conn.prepareStatement("insert into "
            + "EMP.PARTITIONED_TABLE (ID, DESCRIPTION, ADDRESS, COMPANY) values (?, ?, ?, ?)");
        prep.setInt(1,1);
        prep.setString(2, "First");
        prep.setString(3, "A714");
        prep.setString(4, "Pivotal");
        prep.addBatch();
        prep.setInt(1,2);
        prep.setString(2, "Second");
        prep.setString(3, "J 605");
        prep.setString(4, "Zimbra");
        prep.addBatch();
        prep.executeBatch();
        conn.commit();
      } catch (SQLException sqle) {
        throw GemFireXDRuntimeException.newRuntimeException(null, sqle);
      }
    }
  };
  return senderConf;
}
 
源代码9 项目: seata-samples   文件: StorageService.java
/**
 * 0.8.0 release
 *
 * @throws SQLException
 */
@GlobalTransactional
public void batchUpdate() throws SQLException {
    Connection connection = null;
    PreparedStatement preparedStatement = null;
    try {
        connection = dataSource.getConnection();
        connection.setAutoCommit(false);
        String sql = "update storage_tbl set count = ?" +
            "    where id = ? and commodity_code = ?";
        preparedStatement = connection.prepareStatement(sql);
        preparedStatement.setInt(1, 100);
        preparedStatement.setLong(2, 1);
        preparedStatement.setString(3, "2001");
        preparedStatement.addBatch();
        preparedStatement.setInt(1, 200);
        preparedStatement.setLong(2, 2);
        preparedStatement.setString(3, "2002");
        preparedStatement.addBatch();
        preparedStatement.setInt(1, 300);
        preparedStatement.setLong(2, 3);
        preparedStatement.setString(3, "2003");
        preparedStatement.addBatch();
        preparedStatement.executeBatch();
        connection.commit();
        System.out.println(1 / 0);
    } catch (Exception e) {
        throw e;
    } finally {
        connection.close();
        preparedStatement.close();
    }
}
 
源代码10 项目: seata-samples   文件: StorageService.java
/**
 * 0.8.0 release
 *
 * @throws SQLException
 */
@GlobalTransactional
public void batchDelete() throws SQLException {
    Connection connection = null;
    PreparedStatement preparedStatement = null;
    try {
        connection = dataSource.getConnection();
        connection.setAutoCommit(false);
        String sql = "delete from storage_tbl where  count = ? and commodity_code = ?";
        preparedStatement = connection.prepareStatement(sql);
        preparedStatement.setInt(1, 11);
        preparedStatement.setString(2, "2001");
        preparedStatement.addBatch();
        preparedStatement.setInt(1, 22);
        preparedStatement.setString(2, "2002");
        preparedStatement.addBatch();
        preparedStatement.setInt(1, 33);
        preparedStatement.setString(2, "2003");
        preparedStatement.addBatch();
        preparedStatement.executeBatch();
        connection.commit();
        System.out.println(1 / 0);
    } catch (Exception e) {
        throw e;
    } finally {
        connection.close();
        preparedStatement.close();
    }
}
 
源代码11 项目: ambiverse-nlu   文件: DBUtil.java
public synchronized  static void executeBatch(PreparedStatement prepStmt) {
  try {
    prepStmt.executeBatch();
  } catch (SQLException sqle) {
    sqle.getNextException().printStackTrace();
    throw new RuntimeException(sqle);
  }
}
 
源代码12 项目: mykit-db-sync   文件: MySQLSync.java
@Override
public void executeSQL(String sql, Connection conn) throws SQLException {
    PreparedStatement pst = conn.prepareStatement("");
    String[] sqlList = sql.split(";");
    for (int index = 0; index < sqlList.length; index++) {
        pst.addBatch(sqlList[index]);
    }
    pst.executeBatch();
    conn.commit();
    pst.close();
}
 
@Override
protected void onEnd(PreparedStatement statement) throws SQLException {
    int[] updateCount = statement.executeBatch();
    statement.clearBatch();
}
 
private void prepareTable(boolean dropIfExists)
        throws SQLException {
    Statement stmt = createStatement();
    ResultSet rs = getConnection().getMetaData().getTables(
            null, null, TAB, null);
    if (rs.next()) {
        assertFalse(rs.next());
        rs.close();
        if (dropIfExists) {
            println("table " + TAB + " already exists, dropping");
            stmt.executeUpdate("drop table " + TAB);
        } else {
            println("table " + TAB + " already exists, reusing");
            return;
        }
    } else {
        rs.close();
    }

    stmt.executeUpdate("create table " + TAB + " (val1 int, val2 int)");
    stmt.executeUpdate("create index " +
            "mtsel_idx on " + TAB + " (val1, val2)");

    setAutoCommit(false);
    PreparedStatement ps = prepareStatement(
            "insert into " + TAB + " values (?,?)");
    // Insert blocks of 10000 rows.
    int blockCount =    10;
    int blockSize =  10000;
    for (int i=0; i < blockCount; i++) {
        ps.setInt(1, i);
        for (int j=0; j < blockSize; j++) {
            ps.setInt(2, j);
            ps.addBatch();
        }
        ps.executeBatch();
        commit();
        println("inserted block " + (i+1) + "/" + blockCount);
    }
    setAutoCommit(true);
}
 
源代码15 项目: cloudstack   文件: UsageDaoImpl.java
@Override
public void saveVmDiskStats(List<VmDiskStatisticsVO> vmDiskStats) {
    TransactionLegacy txn = TransactionLegacy.currentTxn();
    try {
        txn.start();
        String sql = INSERT_VM_DISK_STATS;
        PreparedStatement pstmt = null;
        pstmt = txn.prepareAutoCloseStatement(sql); // in reality I just want CLOUD_USAGE dataSource connection
        for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
            pstmt.setLong(1, vmDiskStat.getId());
            pstmt.setLong(2, vmDiskStat.getDataCenterId());
            pstmt.setLong(3, vmDiskStat.getAccountId());
            if (vmDiskStat.getVmId() != null) {
                pstmt.setLong(4, vmDiskStat.getVmId());
            } else {
                pstmt.setNull(4, Types.BIGINT);
            }
            if (vmDiskStat.getVolumeId() != null) {
                pstmt.setLong(5, vmDiskStat.getVolumeId());
            } else {
                pstmt.setNull(5, Types.BIGINT);
            }
            pstmt.setLong(6, vmDiskStat.getNetIORead());
            pstmt.setLong(7, vmDiskStat.getNetIOWrite());
            pstmt.setLong(8, vmDiskStat.getCurrentIORead());
            pstmt.setLong(9, vmDiskStat.getCurrentIOWrite());
            pstmt.setLong(10, vmDiskStat.getAggIORead());
            pstmt.setLong(11, vmDiskStat.getAggIOWrite());
            pstmt.setLong(12, vmDiskStat.getNetBytesRead());
            pstmt.setLong(13, vmDiskStat.getNetBytesWrite());
            pstmt.setLong(14, vmDiskStat.getCurrentBytesRead());
            pstmt.setLong(15, vmDiskStat.getCurrentBytesWrite());
            pstmt.setLong(16, vmDiskStat.getAggBytesRead());
            pstmt.setLong(17, vmDiskStat.getAggBytesWrite());
            pstmt.addBatch();
        }
        pstmt.executeBatch();
        txn.commit();
    } catch (Exception ex) {
        txn.rollback();
        s_logger.error("error saving vm disk stats to cloud_usage db", ex);
        throw new CloudRuntimeException(ex.getMessage());
    }

}
 
源代码16 项目: gemfirexd-oss   文件: BOPurgeExecutorServiceImpl.java
private void insertLogData(Connection pCtx) {
  int currentBatchSize = 0;
  try {
    PreparedStatement prepareStatement = pCtx
        .prepareStatement(SELECT_LAST_UPDATED_BO_LOG_RECORD);
    PreparedStatement insertStmt = pCtx
        .prepareStatement(INSERT_INTO_LOG_TABLE_FOR_PURGED_RECORDS);
    ResultSet listOfAllPurgeReadyBOIds = getListOfAllPurgeReadyBOIdsFromTempTable(pCtx);
    if (null != listOfAllPurgeReadyBOIds) {
      while (listOfAllPurgeReadyBOIds.next()) {
        String boId = listOfAllPurgeReadyBOIds.getString("BO_TXN_ID");
        prepareStatement.setString(1, boId);
        ResultSet resultSet = prepareStatement.executeQuery();
        while (resultSet.next()) {
          insertStmt.setString(1, resultSet.getString("BO_TXN_ID"));
          insertStmt.setString(2, resultSet.getString("BACKOFFICE_CODE"));
          insertStmt.setString(3, resultSet.getString("CHANNEL_NAME"));
          insertStmt.setString(4, resultSet.getString("TXN_TYPE"));
          insertStmt.setInt(5, MARK_AS_TO_PURGE);
          insertStmt.setString(6, resultSet.getString("MATCH_STATUS"));
          insertStmt.setString(7, resultSet.getString("MATCH_CATEG_ID"));
          insertStmt.setString(8, resultSet.getString("HIT_STATUS"));
          insertStmt.setString(9, resultSet.getString("ACTUAL_VALUE_DATE"));
          insertStmt.setString(10, "SCREENING_TIME");
          insertStmt.addBatch();
          currentBatchSize++;
          if (currentBatchSize > maxBatchSize) {
            insertStmt.executeBatch();
            pCtx.commit();
            currentBatchSize = 0;
          }
          break;
        }
      }
      insertStmt.executeBatch();
      pCtx.commit();
    }
  } catch (Exception e) {
    Log.getLogWriter().error("Component: UseCase1-SECURITAS:appstat|Event Severity: Fatal|Event Class: MatchingEngine|Description: Issue while insering raw data.-insertLogData Summary:  " + TestHelper.getStackTrace(e));
  }
}
 
源代码17 项目: Plan   文件: ExecBatchStatement.java
@Override
protected boolean callExecute(PreparedStatement statement) throws SQLException {
    return statement.executeBatch().length > 0;
}
 
源代码18 项目: xyz-hub   文件: DatabaseTransactionalWriter.java
public static FeatureCollection insertFeatures(String schema, String table, String streamId, FeatureCollection collection,
                                               List<Feature> inserts, Connection connection)
        throws SQLException, JsonProcessingException {

    boolean batchInsert = false;
    boolean batchInsertWithoutGeometry = false;

    final PreparedStatement insertStmt = createInsertStatement(connection,schema,table);
    final PreparedStatement insertWithoutGeometryStmt = createInsertWithoutGeometryStatement(connection,schema,table);

    insertStmt.setQueryTimeout(TIMEOUT);
    insertWithoutGeometryStmt.setQueryTimeout(TIMEOUT);

    for (int i = 0; i < inserts.size(); i++) {
        final Feature feature = inserts.get(i);

        final PGobject jsonbObject= featureToPGobject(feature);

        if (feature.getGeometry() == null) {
            insertWithoutGeometryStmt.setObject(1, jsonbObject);
            insertWithoutGeometryStmt.addBatch();
            batchInsertWithoutGeometry = true;
        } else {
            insertStmt.setObject(1, jsonbObject);

            final WKBWriter wkbWriter = new WKBWriter(3);
            Geometry jtsGeometry = feature.getGeometry().getJTSGeometry();
            //Avoid NAN values
            assure3d(jtsGeometry.getCoordinates());
            insertStmt.setBytes(2, wkbWriter.write(jtsGeometry));

            insertStmt.addBatch();
            batchInsert = true;
        }
        collection.getFeatures().add(feature);
    }

    if (batchInsert) {
        insertStmt.executeBatch();
    }
    if (batchInsertWithoutGeometry) {
        insertWithoutGeometryStmt.executeBatch();
    }

    return collection;
}
 
源代码19 项目: gemfirexd-oss   文件: TransactionDUnit.java
public void testMultipleInsertFromThinClient_bug44242() throws Exception {
  startServerVMs(2, 0, null);
  int port = startNetworkServer(1, null, null);
  Connection netConn = TestUtil.getNetConnection(port, null, null);
  netConn.createStatement().execute("create schema emp");
  netConn.close();

  for (int i = 0; i < 2; i++) {
    Connection netConn1 = TestUtil.getNetConnection(port, null, null);

    Connection netConn2 = TestUtil.getNetConnection(port, null, null);

    Statement s = netConn1.createStatement();
    String ext = "";
    if (i == 1) {
      ext = "replicate";
    }
    s.execute("create table emp.EMPLOYEE_parent(lastname varchar(30) "
        + "primary key, depId int)" + ext+ getSuffix());
    s.execute("create table emp.EMPLOYEE(lastname varchar(30) primary key, "
        + "depId int, foreign key(lastname) references "
        + "emp.EMPLOYEE_parent(lastname) on delete restrict)" + ext+ getSuffix());
    s.execute("insert into emp.EMPLOYEE_parent values('Jones', 10), "
        + "('Rafferty', 50), ('Robinson', 100)");

    netConn2.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
    netConn2.setAutoCommit(false);
    Statement s2 = netConn2.createStatement();
    s2.execute("delete from emp.EMPLOYEE_parent");
    s2.execute("select * from emp.employee_parent");
    netConn1.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
    netConn1.setAutoCommit(false);
    PreparedStatement pstmnt = netConn1
        .prepareStatement("INSERT INTO emp.employee VALUES (?, ?)");

    pstmnt.setString(1, "Jones");
    pstmnt.setInt(2, 33);
    pstmnt.addBatch();

    pstmnt.setString(1, "Rafferty");
    pstmnt.setInt(2, 31);
    pstmnt.addBatch();

    pstmnt.setString(1, "Robinson");
    pstmnt.setInt(2, 34);
    pstmnt.addBatch();

    try {
      pstmnt.executeBatch();
      netConn1.commit();
      fail("commit should have failed");
    } catch (SQLException e) {
      if (!"X0Z02".equals(e.getSQLState())) {
        throw e;
      }
    }
    netConn2.commit();

    s.execute("drop table emp.employee");
    s.execute("drop table emp.employee_parent");
  }
}
 
源代码20 项目: tuffylite   文件: Clause.java
/**
 * Flush the instance of this clause into database.
 * Here "instance" means all the constant ID in instance with
 * its weight as a row in DB.
 * @param db the database object used to store clause instances.
 */
protected void sealClauseInstances(RDB db) {
	if(!isTemplate){
		Clause.mappingFromID2Desc.put(this.id+ ".0", this.toString(-1));
		return;
	}
	// cosntruct SQL of adding instances
	ArrayList<String> atts = new ArrayList<String>();
	ArrayList<String> pholders = new ArrayList<String>();
	atts.add("weight FLOAT8");
	pholders.add("?");
	for(String v : metaVars) {
		if (Config.constants_as_raw_string) {
			atts.add(v + " TEXT");
		} else {
			atts.add(v + " INT");
		}
		pholders.add("?");
	}
	atts.add("myisfixed TEXT");
	pholders.add("?");
	
	
	db.dropTable(relIntanceClauses);
	String sql = "CREATE TABLE " + relIntanceClauses +
	StringMan.commaListParen(atts);
	db.update(sql);
	
	//TODO:
	db.dropSequence(relIntanceClauses+"_seq");
	sql = "CREATE SEQUENCE " + relIntanceClauses + "_seq;";
	db.update(sql);
	sql = "ALTER TABLE " + relIntanceClauses + " ADD myid INT;";
	db.update(sql);
	sql = "ALTER TABLE " + relIntanceClauses + " ALTER COLUMN myid SET " +
			"DEFAULT NEXTVAL('" + relIntanceClauses + "_seq');";
	db.update(sql);
	
	int instanceCount = 0;
	
	sql = "INSERT INTO " + relIntanceClauses + " VALUES" + 
	StringMan.commaListParen(pholders);
	PreparedStatement psAddMeta = db.getPrepareStatement(sql);
	
	int ni = 0;
	try {
		for(ClauseInstance ins : instances){
			double weight = ins.weight;
			ArrayList<Term> meta = ins.conList;
			psAddMeta.setDouble(1, weight);
			for(int k=0; k<meta.size(); k++) {
				if (Config.constants_as_raw_string) {
					psAddMeta.setString(k+2, meta.get(k).constantString());
				} else {
					psAddMeta.setInt(k+2, meta.get(k).constant());
				}
			}
			psAddMeta.setString(meta.size()+2, ins.isFixedWeight == true? "fixed" : "");
			instanceCount ++;
			Clause.mappingFromID2Desc.put(this.id+ "." + instanceCount, this.toString(ni++));
			psAddMeta.addBatch();
		}
		psAddMeta.executeBatch();
		psAddMeta.close();
	} catch (SQLException e) {
		ExceptionMan.handle(e);
	}
	psAddMeta = null;
}