java.sql.PreparedStatement#addBatch ( )源码实例Demo

下面列出了java.sql.PreparedStatement#addBatch ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: SimpleFlatMapper   文件: DefaultCrud.java
protected <RH extends CheckedConsumer<? super K>> RH executeQueryPreparerInBatchMode(Connection connection, Collection<T> values, RH keyConsumer, QueryPreparer<T> queryPreparer) throws SQLException {
    PreparedStatement preparedStatement = queryPreparer.prepareStatement(connection);
    try {
        FieldMapper<T, PreparedStatement> mapper = queryPreparer.mapper();

        for (T value : values) {
            mapper.mapTo(value, preparedStatement, null);
            preparedStatement.addBatch();
        }

        preparedStatement.executeBatch();
        if (hasGeneratedKeys && keyConsumer != null) {
            handleGeneratedKeys(keyConsumer, preparedStatement);
        }
        return keyConsumer;
    } catch(Exception e) {
        ErrorHelper.rethrow(e);
    } finally {
        safeClose(preparedStatement);
    }
    return keyConsumer;
}
 
源代码2 项目: hermes   文件: GenerateKeyTest.java
@Test
public void test() throws Exception {
	Class.forName("com.mysql.jdbc.Driver");
	Connection conn = DriverManager.getConnection("jdbc:mysql://127.0.0.1", "root", null);

	PreparedStatement stmt = conn.prepareStatement("insert into test.test (name) values (?)",
	      Statement.RETURN_GENERATED_KEYS);

	stmt.setString(1, "a");
	stmt.addBatch();
	stmt.setString(1, "b");
	stmt.addBatch();

	stmt.executeBatch();

	ResultSet rs = stmt.getGeneratedKeys();
	while (rs.next()) {
		System.out.println(rs.getLong(1));
	}
}
 
源代码3 项目: BigDataArchitect   文件: LocationCollector.java
@Override
public void collect(Configuration conf, BaseDimension key, BaseStatsValueWritable value, PreparedStatement pstmt, IDimensionConverter converter) throws SQLException, IOException {
    StatsLocationDimension locationDimension = (StatsLocationDimension) key;
    LocationReducerOutputValue locationReducerOutputValue = (LocationReducerOutputValue) value;

    int i = 0;
    pstmt.setInt(++i, converter.getDimensionIdByValue(locationDimension.getStatsCommon().getPlatform()));
    pstmt.setInt(++i, converter.getDimensionIdByValue(locationDimension.getStatsCommon().getDate()));
    pstmt.setInt(++i, converter.getDimensionIdByValue(locationDimension.getLocation()));
    pstmt.setInt(++i, locationReducerOutputValue.getUvs());
    pstmt.setInt(++i, locationReducerOutputValue.getVisits());
    pstmt.setInt(++i, locationReducerOutputValue.getBounceNumber());
    pstmt.setString(++i, conf.get(GlobalConstants.RUNNING_DATE_PARAMES));
    pstmt.setInt(++i, locationReducerOutputValue.getUvs());
    pstmt.setInt(++i, locationReducerOutputValue.getVisits());
    pstmt.setInt(++i, locationReducerOutputValue.getBounceNumber());

    pstmt.addBatch();
    
    
}
 
源代码4 项目: Mycat2   文件: GoodsInsertJob.java
private int insert(Connection con,List<Map<String, String>> list) throws SQLException {
	PreparedStatement ps;
	String sql = "insert into goods (id,name ,good_type,good_img_url,good_created ,good_desc, price ) values(?,? ,?,?,? ,?, ?)";
	ps = con.prepareStatement(sql);
	for (Map<String, String> map : list) {
		ps.setLong(1, Long.parseLong(map.get("id")));
		ps.setString(2, (String) map.get("name"));
		ps.setShort(3, Short.parseShort(map.get("good_type")));
		ps.setString(4, (String) map.get("good_img_url"));
		ps.setString(5, (String) map.get("good_created"));
		ps.setString(6, (String) map.get("good_desc"));
		ps.setDouble(7, Double.parseDouble(map.get("price")));
		ps.addBatch();
	}
	ps.executeBatch();
	return list.size();
}
 
源代码5 项目: ignite   文件: CacheMvccStreamingInsertTest.java
/**
 * @throws Exception If failed.
 */
@Test
public void testUpdateWithOverwrite() throws Exception {
    conn.createStatement().execute("SET STREAMING 1 BATCH_SIZE 2 ALLOW_OVERWRITE 1 " +
        " PER_NODE_BUFFER_SIZE 1000 FLUSH_FREQUENCY 100");
    sqlNexus.query(q("insert into person values(1, 'ivan')"));

    PreparedStatement batchStmt = conn.prepareStatement("insert into person values(?, ?)");
    batchStmt.setInt(1, 1);
    batchStmt.setString(2, "foo");
    batchStmt.addBatch();
    batchStmt.setInt(1, 2);
    batchStmt.setString(2, "bar");
    batchStmt.addBatch();
    TimeUnit.MILLISECONDS.sleep(500);

    List<List<?>> rows = sqlNexus.query(q("select * from person")).getAll();
    List<List<?>> exp = asList(
        asList(1, "foo"),
        asList(2, "bar")
    );
    assertEquals(exp, rows);
}
 
/**
 * Stores identity data set.
 *
 * @param metadataSet
 * @throws IdentityException
 */
public void storeMetadataSet(IdentityMetadataDO[] metadataSet) throws IdentityException {
    Connection connection = IdentityDatabaseUtil.getDBConnection();
    PreparedStatement prepStmt = null;
    try {
        prepStmt = connection.prepareStatement(SQLQuery.STORE_META_DATA);
        for (IdentityMetadataDO metadata : metadataSet) {
            prepStmt.setString(1, metadata.getUserName());
            prepStmt.setInt(2, metadata.getTenantId());
            prepStmt.setString(3, metadata.getMetadataType());
            prepStmt.setString(4, metadata.getMetadata());
            prepStmt.setString(5, Boolean.toString(metadata.isValid()));
            prepStmt.addBatch();
        }
        prepStmt.executeBatch();
        IdentityDatabaseUtil.commitTransaction(connection);
    } catch (SQLException e) {
        IdentityDatabaseUtil.rollbackTransaction(connection);
        throw IdentityException.error("Error while storing user identity data", e);
    } finally {
        IdentityDatabaseUtil.closeStatement(prepStmt);
        IdentityDatabaseUtil.closeConnection(connection);
    }
}
 
源代码7 项目: attic-polygene-java   文件: AbstractSQLIndexing.java
private Integer storeValueCompositeProperty(
    Map<QualifiedName, PreparedStatement> qNameInsertPSs,
    PreparedStatement insertAllQNamesPS,
    Integer propertyPK,
    Long entityPK,
    QualifiedName qName,
    Object property,
    Integer parentQNameID
                                           )
    throws SQLException
{

    PreparedStatement ps = qNameInsertPSs.get( qName );
    insertAllQNamesPS.setInt( 1, propertyPK );
    insertAllQNamesPS.setLong( 2, entityPK );
    insertAllQNamesPS.addBatch();

    ps.setInt( 1, propertyPK );
    ps.setLong( 2, entityPK );
    ps.setObject( 3, parentQNameID, Types.INTEGER );
    this.storeVCClassIDUsingPS( ps, 4, property );
    ps.addBatch();

    return this.storePropertiesOfVC( qNameInsertPSs, insertAllQNamesPS, propertyPK, entityPK,
                                     property );
}
 
@Test
public void checkPrepareStatement() throws Throwable {
  try (Connection connection = getNewConnection("&retriesAllDown=6", true)) {
    Statement stmt = connection.createStatement();
    stmt.execute("drop table  if exists failt1");
    stmt.execute("create table failt1 (id int not null primary key auto_increment, tt int)");

    PreparedStatement preparedStatement =
        connection.prepareStatement("insert into failt1(id, tt) values (?,?)");

    int masterServerId = getServerId(connection);
    stopProxy(masterServerId);

    preparedStatement.setInt(1, 1);
    preparedStatement.setInt(2, 1);
    preparedStatement.addBatch();
    try {
      preparedStatement.executeBatch();
      fail();
    } catch (SQLException e) {
      // normal exception
    }
    restartProxy(masterServerId);
    stmt.execute("SELECT 1");
  }
}
 
源代码9 项目: carbon-identity   文件: ApplicationDAOImpl.java
/**
 * Add Service provider properties
 *
 * @param dbConnection
 * @param spId
 * @param properties
 * @throws SQLException
 */
private void addServiceProviderProperties(Connection dbConnection, int spId,
        List<ServiceProviderProperty> properties, int tenantId)
        throws SQLException {
    String sqlStmt = ApplicationMgtDBQueries.ADD_SP_METADATA;
    PreparedStatement prepStmt = null;
    try {
        prepStmt = dbConnection.prepareStatement(sqlStmt);

        for (ServiceProviderProperty property : properties) {
            prepStmt.setInt(1, spId);
            prepStmt.setString(2, property.getName());
            prepStmt.setString(3, property.getValue());
            prepStmt.setString(4, property.getDisplayName());
            prepStmt.setInt(5, tenantId);
            prepStmt.addBatch();
        }
        prepStmt.executeBatch();

    } finally {
        IdentityApplicationManagementUtil.closeStatement(prepStmt);
    }
}
 
源代码10 项目: gemfirexd-oss   文件: TradeBuyOrderDMLStmtJson.java
protected void addBatchInsert(PreparedStatement stmt, int oid, int cid, int sid, int qty,
    String status, Timestamp time, BigDecimal bid, int tid, boolean isPut) throws SQLException {
  
  JSONObject json = new JSONObject();
  String jsonLog ="";
  
  if (SQLTest.hasJSON &&  ! SQLHelper.isDerbyConn(stmt.getConnection()) ) {
         json = getJSONObject(oid,cid,sid,qty,status,time,bid,tid);
         jsonLog = ",JSON_DETAILS: " +json.toJSONString();
  }
  
  Log.getLogWriter().info( (SQLHelper.isDerbyConn(stmt.getConnection())? "Derby - " :"gemfirexd - "  ) +  (isPut ? "putting " : "inserting ") + " into trade.buyorders with data OID:" + oid +
      ",CID:"+ cid + ",SID:" + sid + ",QTY:" + qty + ",STATUS:" + status +
      ",TIME:"+ time + ",BID:" + bid + ",TID:" + tid + jsonLog);
  
  stmt.setInt(1, oid);
  stmt.setInt(2, cid);
  stmt.setInt(3, sid);
  stmt.setInt(4, qty);
  stmt.setBigDecimal(5, bid);
  stmt.setTimestamp(6, time);
  stmt.setString(7, status);       
  stmt.setInt(8, tid);
  if (SQLTest.hasJSON &&  ! SQLHelper.isDerbyConn(stmt.getConnection()) ) {  Clob jsonClob = stmt.getConnection().createClob();
  jsonClob.setString(1, json.toJSONString());
  stmt.setClob(9, jsonClob); }
  stmt.addBatch();
}
 
源代码11 项目: spliceengine   文件: PrepStmtNullTest.java
/**
 * Testing a Null Blob
 *
 * @exception SQLException if error occurs
 */
public void testNullBlob() throws SQLException {
    
    Connection con = getConnection();
    
    con.setAutoCommit(false);
    
    PreparedStatement pStmt = con.prepareStatement("insert into nullBlob values (?,?)");
    
    pStmt.setString(1,"blob");
    pStmt.setNull(2,java.sql.Types.BLOB);
    pStmt.addBatch();
    pStmt.setString(1,"blob1");
    pStmt.setNull(2,java.sql.Types.BLOB,"");
    pStmt.addBatch();
    
    pStmt.executeBatch();
    pStmt.close();
    commit();
    
    Statement stmt1 = con.createStatement();
    ResultSet rs = stmt1.executeQuery("select * from nullBlob");
    String [][]  expectedRows = new String[][] { { "blob", null },
    { "blob1", null } };
    JDBC.assertFullResultSet(rs, expectedRows);
    stmt1.close();
    commit();
    con.close();
}
 
源代码12 项目: gemfirexd-oss   文件: MEE.java
public void submitTradeToMarket(Connection conn, ArrayList<TradeInfo> tradeRequest, 
    ArrayList<Long> tm_ids) throws SQLException {
  
  String insert = tradeToMarketWithDefaultId? insertTradeMarket : insertTradeMarketWithoutDefaultId;
  
  PreparedStatement ps= conn.prepareStatement(insert);
  for (int i =0; i< tradeRequest.size(); i++) {      
    ps.setString(1, tradeRequest.get(i).getSymbol()); // TI_S_SYMB,
    ps.setLong(2, tradeRequest.get(i).getTradeId()); // TI_T_ID
    ps.setInt(3, tradeRequest.get(i).getTradeQty()); //TI_T_QTY
    ps.setBigDecimal(4, tradeRequest.get(i).getPriceQuote()); // TT_T_BID_PRICE
    ps.setString(5, tradeRequest.get(i).getTradeType()); //T_TT_ID
    
    if (!tradeToMarketWithDefaultId) {
      ps.setLong(6, tm_ids.get(i));
    }
     
    if (TPCETest.logDML) {
      Log.getLogWriter().info(insert + " in batch TM_S_SYMB: " + tradeRequest.get(i).getSymbol()
          + " TM_T_ID:" + tradeRequest.get(i).getTradeId() + " TM_T_QTY: " + tradeRequest.get(i).getTradeQty()
          + " TM_T_BID_PRICE: " + tradeRequest.get(i).getPriceQuote() + " T_TT_ID: " + tradeRequest.get(i).getTradeType()
          + (tradeToMarketWithDefaultId ? "" :" and TM_ID: " + tm_ids.get(i)));
    }
    ps.addBatch();
  }
  int[] counts = ps.executeBatch();
  for (int count: counts) {
    if (count != 1) throw new TestException (insert + " failed insert all rows");
  }
   
}
 
源代码13 项目: Plan   文件: LargeStoreQueries.java
public static Executable storeAllPingData(Map<UUID, List<Ping>> ofUsers) {
    if (Verify.isEmpty(ofUsers)) {
        return Executable.empty();
    }

    return new ExecBatchStatement(PingTable.INSERT_STATEMENT) {
        @Override
        public void prepare(PreparedStatement statement) throws SQLException {
            for (Map.Entry<UUID, List<Ping>> entry : ofUsers.entrySet()) {
                UUID uuid = entry.getKey();
                List<Ping> pings = entry.getValue();
                for (Ping ping : pings) {
                    UUID serverUUID = ping.getServerUUID();
                    long date = ping.getDate();
                    int minPing = ping.getMin();
                    int maxPing = ping.getMax();
                    double avgPing = ping.getAverage();

                    statement.setString(1, uuid.toString());
                    statement.setString(2, serverUUID.toString());
                    statement.setLong(3, date);
                    statement.setInt(4, minPing);
                    statement.setInt(5, maxPing);
                    statement.setDouble(6, avgPing);
                    statement.addBatch();
                }
            }
        }
    };
}
 
源代码14 项目: PGM   文件: SQLDatastore.java
@Override
public void query(PreparedStatement statement) throws SQLException {
  statement.setLong(2, bitSettings(value));
  statement.setString(3, getId().toString());

  for (SettingValue unset : value.getKey().getPossibleValues()) {
    if (unset == value) continue;
    statement.setLong(1, bitSettings(unset));
    statement.addBatch();
  }

  statement.executeBatch();
}
 
@Override
public List<Integer> removeApplications(List<Application> apps, int tenantId) throws DeviceManagementDAOException {
    Connection conn = null;
    PreparedStatement stmt = null;
    ResultSet rs = null;
    List<Integer> applicationIds = new ArrayList<>();
    try {
        conn = this.getConnection();
        conn.setAutoCommit(false);
        stmt = conn.prepareStatement("DELETE DM_APPLICATION WHERE APP_IDENTIFIER = ? AND TENANT_ID = ?",
                new String[]{"id"});

        for (Application app : apps) {
            stmt.setString(1, app.getApplicationIdentifier());
            stmt.setInt(2, tenantId);
            stmt.addBatch();
        }
        stmt.executeBatch();
        rs = stmt.getGeneratedKeys();
        if (rs.next()) {
            applicationIds.add(rs.getInt(1));
        }
        return applicationIds;
    } catch (SQLException e) {
        try {
            if (conn != null) {
                conn.rollback();
            }
        } catch (SQLException e1) {
            log.error("Error occurred while roll-backing the transaction", e);
        }
        throw new DeviceManagementDAOException("Error occurred while removing bulk application list", e);
    } finally {
        DeviceManagementDAOUtil.cleanupResources(stmt, rs);
    }
}
 
源代码16 项目: ontopia   文件: SQLBatchOneToManyAggregate.java
@Override
protected void executeUpdate(PreparedStatement stm, String sql) throws Exception {
  // Add batch update
  if (debug) log.debug("Adding batch: " + sql);
  stm.addBatch();
}
 
源代码17 项目: gemfirexd-oss   文件: AlterTableTest.java
public void testNWEA() throws Exception {
  // start a client and some servers
  // starting servers first to give them lesser VMIds than the client

  Connection conn = TestUtil.getConnection();
  Statement stmt = conn.createStatement();

  // Check for IDENTITY column with INT size added using ALTER TABLE
  stmt.execute("create table trade.customers (tid int, cid int not null, "
      + "primary key (cid), constraint cust_ck check (cid >= 0))");
  // first some inserts with gaps
  final int maxValue = 1000;
  int stepValue = 3;
  PreparedStatement pstmt = conn
      .prepareStatement("insert into trade.customers values (?, ?)");
  for (int v = 1; v <= maxValue; v += stepValue) {
    pstmt.setInt(1, v * stepValue);
    pstmt.setInt(2, v);
    pstmt.addBatch();
  }
  pstmt.executeBatch();

  stmt.execute("create table trade.customers1 (tid int, cid int not null, "
      + "primary key (cid), constraint cust1_ck check (cid >= 0))");
  // first some inserts with gaps
  final int maxValue1 = 1000;
  int stepValue1 = 3;
  PreparedStatement pstmt1 = conn
      .prepareStatement("insert into trade.customers1 values (?, ?)");
  for (int v = 1; v <= maxValue1; v += stepValue1) {
    pstmt1.setInt(1, v * stepValue1);
    pstmt1.setInt(2, v);
    pstmt1.addBatch();
  }
  pstmt1.executeBatch();

  // now add the GENERATED IDENTITY column specification
  stmt.execute("alter table trade.customers alter column cid "
      + "SET GENERATED ALWAYS AS IDENTITY");

  stmt.execute("alter table trade.customers1 alter column cid "
      + "SET GENERATED ALWAYS AS IDENTITY");
}
 
public void testPartitionOfflineBehaviourBug49563() throws Exception {
  stopAllVMs();
  startVMs(0, 1);
  startVMs(1,0);
  createDiskStore(true, 1);
  // Create a schema
  clientSQLExecute(1, "create schema trade");

  clientSQLExecute(1, "create table trade.buyorders(oid int not null constraint buyorders_pk primary key, cid int, sid int)  "
      + "partition by range   (sid) ( VALUES BETWEEN 0 AND 409, VALUES BETWEEN 409 AND 1102, VALUES BETWEEN 1102 AND 1251, VALUES BETWEEN 1251 AND 1477, "
      + "VALUES BETWEEN 1477 AND 1700, VALUES BETWEEN 1700 AND 100000)  REDUNDANCY 1 RECOVERYDELAY -1 STARTUPRECOVERYDELAY -1 " + getSuffix());
  Connection conn = TestUtil.getConnection();    

  PreparedStatement ps = conn.prepareStatement("insert into trade.buyorders values (?, ?, ?)");
  
  //Create bucket 1 on the existing servers.
  ps.setInt(1, 1);
  ps.setInt(2, 1);
  ps.setInt(3, 1);
  ps.execute();


  //start another server.
  startVMs(0, 1);
  
  // stop the first server
  stopVMNum(-1);

  //Now create a putall that will put a few entries into each bucket. Bucket 1 should
  //get a partition offline exception.
  
  //bucket 2
  ps.setInt(1, 2);
  ps.setInt(2, 2);
  ps.setInt(3, 500);
  ps.addBatch();
  
  //bucket 1
  ps.setInt(1, 3);
  ps.setInt(2, 3);
  ps.setInt(3, 400);
  ps.addBatch();
  
  //bucket 2
  ps.setInt(1, 4);
  ps.setInt(2, 4);
  ps.setInt(3, 600);
  ps.addBatch();
  addExpectedException(new int[] { 1 }, null, PartitionOfflineException.class);
  try {
    ps.executeBatch();
    fail("Should have failed with a partition offline exception.");
  } catch(SQLException sql) {
    assertEquals("X0Z09",sql.getSQLState());
  }

  removeExpectedException(new int[] { 1 }, null,
      PartitionOfflineException.class);

  // restart the server to enable dropping the diskstore in teardown
  restartVMNums(-1);
}
 
源代码19 项目: snowflake-jdbc   文件: BindingDataIT.java
@Test
public void testBindNull() throws SQLException
{
  Connection connection = getConnection();
  Statement statement = connection.createStatement();
  statement.execute("create or replace table test_bind_null(id number, val " +
                    "number)");

  PreparedStatement preparedStatement = connection.prepareStatement(
      "insert into test_bind_null values (?, ?)");
  preparedStatement.setInt(1, 0);
  preparedStatement.setBigDecimal(2, null);
  preparedStatement.addBatch();

  preparedStatement.setInt(1, 1);
  preparedStatement.setNull(1, Types.INTEGER);
  preparedStatement.addBatch();

  preparedStatement.setInt(1, 2);
  preparedStatement.setObject(1, null, Types.BIGINT);
  preparedStatement.addBatch();

  preparedStatement.setInt(1, 3);
  preparedStatement.setObject(1, null, Types.BIGINT, 0);
  preparedStatement.addBatch();

  preparedStatement.executeBatch();

  ResultSet rs = statement.executeQuery("select * from test_bind_null " +
                                        "order by id asc");
  int count = 0;
  while (rs.next())
  {
    assertThat(rs.getBigDecimal("VAL"), is(nullValue()));
    count++;
  }

  assertThat(count, is(4));

  rs.close();
  preparedStatement.close();

  statement.execute("drop table if exists test_bind_null");
  connection.close();
}
 
源代码20 项目: gemfirexd-oss   文件: BOPurgeExecutorServiceImpl.java
private void insertLogData(Connection pCtx) {
  int currentBatchSize = 0;
  try {
    PreparedStatement prepareStatement = pCtx
        .prepareStatement(SELECT_LAST_UPDATED_BO_LOG_RECORD);
    PreparedStatement insertStmt = pCtx
        .prepareStatement(INSERT_INTO_LOG_TABLE_FOR_PURGED_RECORDS);
    ResultSet listOfAllPurgeReadyBOIds = getListOfAllPurgeReadyBOIdsFromTempTable(pCtx);
    if (null != listOfAllPurgeReadyBOIds) {
      while (listOfAllPurgeReadyBOIds.next()) {
        String boId = listOfAllPurgeReadyBOIds.getString("BO_TXN_ID");
        prepareStatement.setString(1, boId);
        ResultSet resultSet = prepareStatement.executeQuery();
        while (resultSet.next()) {
          insertStmt.setString(1, resultSet.getString("BO_TXN_ID"));
          insertStmt.setString(2, resultSet.getString("BACKOFFICE_CODE"));
          insertStmt.setString(3, resultSet.getString("CHANNEL_NAME"));
          insertStmt.setString(4, resultSet.getString("TXN_TYPE"));
          insertStmt.setInt(5, MARK_AS_TO_PURGE);
          insertStmt.setString(6, resultSet.getString("MATCH_STATUS"));
          insertStmt.setString(7, resultSet.getString("MATCH_CATEG_ID"));
          insertStmt.setString(8, resultSet.getString("HIT_STATUS"));
          insertStmt.setString(9, resultSet.getString("ACTUAL_VALUE_DATE"));
          insertStmt.setString(10, "SCREENING_TIME");
          insertStmt.addBatch();
          currentBatchSize++;
          if (currentBatchSize > maxBatchSize) {
            insertStmt.executeBatch();
            pCtx.commit();
            currentBatchSize = 0;
          }
          break;
        }
      }
      insertStmt.executeBatch();
      pCtx.commit();
    }
  } catch (Exception e) {
    Log.getLogWriter().error("Component: UseCase1-SECURITAS:appstat|Event Severity: Fatal|Event Class: MatchingEngine|Description: Issue while insering raw data.-insertLogData Summary:  " + TestHelper.getStackTrace(e));
  }
}