java.sql.PreparedStatement#execute ( )源码实例Demo

下面列出了java.sql.PreparedStatement#execute ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: boubei-tss   文件: FlowrateManager.java
protected Statement createRecords(Connection conn) throws SQLException {
	String insertSql = "insert into portal_flowrate(pageId, ip, visitTime) values(?, ?, ?)";
    
    PreparedStatement pstmt = conn.prepareStatement(insertSql);
    for (Iterator<?> it = records.iterator(); it.hasNext();) {
        FlowRate temp = (FlowRate) it.next();
        
        if(checkIsTheSameVisitor(conn, temp)) {
            continue;
        }
        
        int index = 1;
        pstmt.setLong(index++, temp.getPageId());
        pstmt.setString(index++, temp.getIp());
        pstmt.setTimestamp(index++, new Timestamp(temp.getVisitTime().getTime()));
        
        pstmt.execute();
    }
    return pstmt;
}
 
源代码2 项目: entando-core   文件: MockUserDAO.java
/**
 * Executes the given SQL so to mock some date in the authuser table for a particular user  
 * @param username the user which we want to set the access date
 * @param accessDate the date to set, if null the current date will be used
 * @param the sql provided by the caller method
 */
private void mockDate(String username, Date accessDate, String sql) {
	Date date = new Date(); 
	Connection conn = null;
	PreparedStatement stat = null;
	if (null == accessDate) accessDate = date;
	try {
		conn = this.getConnection();
		stat = conn.prepareStatement(sql);
		stat.setDate(1, new java.sql.Date(accessDate.getTime()));
		stat.setString(2, username);
		stat.execute();
	} catch (Throwable t) {
		_logger.error("Error setting date. username: {} - accessDate:{} - sql:{}", username, accessDate, sql, t);
		throw new RuntimeException("Error setting date", t);
		//processDaoException(t, "Errore nel settare data", "mockDate");
	}  finally {
		this.closeDaoResources(null, stat, conn);
	}
}
 
源代码3 项目: cosmic   文件: AsyncJobDaoImpl.java
@Override
@DB
public void resetJobProcess(final long msid, final int jobResultCode, final String jobResultMessage) {
    final String sql = "UPDATE async_job SET job_status=?, job_result_code=?, job_result=? where job_status=? AND (job_executing_msid=? OR (job_executing_msid IS NULL AND " +
            "job_init_msid=?))";
    final TransactionLegacy txn = TransactionLegacy.currentTxn();
    PreparedStatement pstmt = null;
    try {
        pstmt = txn.prepareAutoCloseStatement(sql);
        pstmt.setInt(1, JobInfo.Status.FAILED.ordinal());
        pstmt.setInt(2, jobResultCode);
        pstmt.setString(3, jobResultMessage);
        pstmt.setInt(4, JobInfo.Status.IN_PROGRESS.ordinal());
        pstmt.setLong(5, msid);
        pstmt.setLong(6, msid);
        pstmt.execute();
    } catch (final SQLException e) {
        s_logger.warn("Unable to reset job status for management server " + msid, e);
    }
}
 
@Test
public void testMultiRouterResult9() throws Exception {
    DataSource ds = (DataSource) context.getBean("zebraDS");
    Connection conn = null;
    try {
        conn = ds.getConnection();
        PreparedStatement stmt = conn.prepareStatement("select max(score) m_score from test");
        stmt.execute();
        ResultSet rs = stmt.getResultSet();
        List<Integer> rows = new ArrayList<Integer>();
        while (rs.next()) {
            rows.add(rs.getInt("m_score"));
        }
        Assert.assertEquals(20, rows.get(0).intValue());
    } catch (Exception e) {
        Assert.fail();
    } finally {
        if (conn != null) {
            conn.close();
        }
    }
}
 
源代码5 项目: Kepler   文件: TeleporterDao.java
public static void addPair(int itemId, int linkedId) throws SQLException {
    Connection sqlConnection = null;
    PreparedStatement preparedStatement = null;

    try {
        sqlConnection = Storage.getStorage().getConnection();
        preparedStatement = Storage.getStorage().prepare("INSERT INTO items_teleporter_links (item_id, linked_id) VALUES (?, ?)", sqlConnection);
        preparedStatement.setInt(1, itemId);
        preparedStatement.setInt(2, linkedId);
        preparedStatement.execute();
    } catch (Exception e) {
        Storage.logError(e);
        throw e;
    } finally {
        Storage.closeSilently(preparedStatement);
        Storage.closeSilently(sqlConnection);
    }
}
 
源代码6 项目: translationstudio8   文件: DBOperator.java
/**
 * 写MartifHeader节点内容
 * @param hContent
 *            整个节点的内容
 * @param hIdAttr
 *            MartifHeader节点的ID属性;
 * @return
 * @throws SQLException
 */
public int insertBMartifHeader(String hContent, String hIdAttr) throws SQLException {
	PreparedStatement stmt = null;
	try {
		String sql = dbConfig.getOperateDbSQL("insert-bmartifheader");
		stmt = conn.prepareStatement(sql, Statement.RETURN_GENERATED_KEYS);
		stmt.setString(1, hIdAttr);
		stmt.setString(2, hContent);
		stmt.execute();
		ResultSet rs = stmt.getGeneratedKeys();
		if (rs.next()) {
			return rs.getInt(1);
		}
	} finally {
		if (stmt != null) {
			stmt.close();
		}
	}
	return 0;
}
 
源代码7 项目: gemfirexd-oss   文件: TradeBuyOrderDMLStmtJson.java
@SuppressWarnings("unchecked")
  private  void updateCustomerWithNewArray(Connection conn , int cid , JSONObject jsonBuyorderObject , JSONArray jsonBuyorderArray  ) {
    
    jsonBuyorderObject.put("buyorder" , jsonBuyorderArray);
    Log.getLogWriter().info("updating trade.customers for CID:" + cid + "  with buyorder_json:" + jsonBuyorderObject.toJSONString() );
    String stmt = "update trade.customers set buyorder_json = ? where cid = " + cid;
    
    try{
    PreparedStatement ps = conn.prepareStatement(stmt);      
    ps.setObject(1, jsonBuyorderObject.toJSONString());
    ps.execute();
    } catch (SQLException se ) {
      if (se.getSQLState().equals("X0Z01") && isHATest ) {
        Log.getLogWriter().info("Retrying the operation " ) ;          
        updateCustomerWithNewArray( conn ,  cid, jsonBuyorderObject , jsonBuyorderArray);
    } else {
      throw new TestException (TestHelper.getStackTrace(se));
    }
}
  }
 
源代码8 项目: Zebra   文件: LimitTest.java
@Test
public void testLimit2_2() throws Exception {
	DataSource ds = (DataSource) context.getBean("zebraDS");
	Connection conn = null;
	try {
		conn = ds.getConnection();
		PreparedStatement stmt = conn.prepareStatement("select name from test limit 10,?");
		stmt.setInt(1, 5);
		stmt.execute();
		ResultSet rs = stmt.getResultSet();
		List<String> rows = new ArrayList<String>();
		while (rs.next()) {
			rows.add(rs.getString("name"));
		}
		Assert.assertEquals(5, rows.size());
	} catch (Exception e) {
		Assert.fail();
	} finally {
		if (conn != null) {
			conn.close();
		}
	}
}
 
源代码9 项目: phoenix   文件: PhoenixMetricsSink.java
/**
 * Create a stats table with the given name. Stores the name for use later when creating upsert
 * statements
 *
 * @param conn  connection to use when creating the table
 * @param table name of the table to create
 * @throws SQLException if any phoenix operations fails
 */
private void createTable(Connection conn, String table) throws SQLException {
    // only primary-key columns can be marked non-null
    String ddl =
            "create table if not exists " + table + "( " +
                    TRACE.columnName + " bigint not null, " +
                    PARENT.columnName + " bigint not null, " +
                    SPAN.columnName + " bigint not null, " +
                    DESCRIPTION.columnName + " varchar, " +
                    START.columnName + " bigint, " +
                    END.columnName + " bigint, " +
                    HOSTNAME.columnName + " varchar, " +
                    TAG_COUNT + " smallint, " +
                    ANNOTATION_COUNT + " smallint" +
                    "  CONSTRAINT pk PRIMARY KEY (" + TRACE.columnName + ", "
                    + PARENT.columnName + ", " + SPAN.columnName + "))\n" +
                    // We have a config parameter that can be set so that tables are
                    // transactional by default. If that's set, we still don't want these system
                    // tables created as transactional tables, make these table non
                    // transactional
                    PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
    PreparedStatement stmt = conn.prepareStatement(ddl);
    stmt.execute();
}
 
@Test
public void testDelete2() throws Exception {
    DataSource ds = (DataSource) context.getBean("zebraDS");
    Connection conn = null;
    try {
        conn = ds.getConnection();
        PreparedStatement stmt = conn.prepareStatement("delete from test where id=?");
        stmt.setInt(1, 1);
        stmt.execute();
        Statement stmt2 = conn.createStatement();
        stmt2.execute("select name from test where id=1");
        ResultSet rs = stmt2.getResultSet();
        while (rs.next()) {
            Assert.fail();
        }
        Assert.assertTrue(true);

    } catch (Exception e) {
        Assert.fail();
    } finally {
        if (conn != null) {
            conn.close();
        }
    }
}
 
源代码11 项目: phoenix   文件: Array3IT.java
@Test
public void testServerArrayElementProjection3() throws SQLException {

    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(getUrl(), props);
    String table = generateUniqueName();
    String ddl = "CREATE TABLE   " + table + "  (p INTEGER PRIMARY KEY, arr1 INTEGER ARRAY, arr2 VARCHAR ARRAY, arr3 INTEGER ARRAY)";
    conn.createStatement().execute(ddl);
    conn.close();

    conn = DriverManager.getConnection(getUrl(), props);
    PreparedStatement stmt = conn.prepareStatement("UPSERT INTO   " + table + "  VALUES (1, ARRAY[1, 2], ARRAY['a', 'b'], ARRAY[2, 3])");
    stmt.execute();
    conn.commit();
    conn.close();

    conn = DriverManager.getConnection(getUrl(), props);
    ResultSet rs;
    stmt = conn.prepareStatement("SELECT arr1, arr1[1], arr2[1], arr3, arr3[1] from   " + table);
    rs = stmt.executeQuery();
    assertTrue(rs.next());
    assertEquals(conn.createArrayOf("INTEGER", new Integer[]{1, 2}), rs.getArray(1));
    assertEquals(1, rs.getInt(2));
    assertEquals("a", rs.getString(3));
    assertEquals(conn.createArrayOf("INTEGER", new Integer[]{2, 3}), rs.getArray(4));
    assertEquals(2, rs.getInt(5));
}
 
源代码12 项目: shardingsphere   文件: AbstractJDBCImporter.java
private void executeInsert(final Connection connection, final DataRecord record) throws SQLException {
    String insertSql = sqlBuilder.buildInsertSQL(record);
    PreparedStatement ps = connection.prepareStatement(insertSql);
    ps.setQueryTimeout(30);
    try {
        for (int i = 0; i < record.getColumnCount(); i++) {
            ps.setObject(i + 1, record.getColumn(i).getValue());
        }
        ps.execute();
    } catch (SQLIntegrityConstraintViolationException ignored) {
    }
}
 
源代码13 项目: L2jOrg   文件: SchemeBufferTable.java
public void saveSchemes() {
    try (Connection con = DatabaseFactory.getInstance().getConnection()) {
        // Delete all entries from database.
        PreparedStatement st = con.prepareStatement(DELETE_SCHEMES);
        st.execute();
        st.close();

        st = con.prepareStatement(INSERT_SCHEME);

        // Save _schemesTable content.
        for (var player : _schemesTable.entrySet()) {
            for (Map.Entry<String, ArrayList<Integer>> scheme : player.getValue().entrySet()) {
                // Build a String composed of skill ids seperated by a ",".
                final StringBuilder sb = new StringBuilder();
                for (int skillId : scheme.getValue()) {
                    sb.append(skillId + ",");
                }

                // Delete the last "," : must be called only if there is something to delete !
                if (sb.length() > 0) {
                    sb.setLength(sb.length() - 1);
                }

                st.setInt(1, player.getKey());
                st.setString(2, scheme.getKey());
                st.setString(3, sb.toString());
                st.addBatch();
            }
        }
        st.executeBatch();
        st.close();
    } catch (Exception e) {
        LOGGER.warn("BufferTableScheme: Error while saving schemes : " + e);
    }
}
 
源代码14 项目: gemfirexd-oss   文件: SingleHopTest.java
public void test0MaxConnectionsAndSetSchema() throws Exception {
  System.setProperty("gemfirexd.client.single-hop-max-connections", "100");
  com.pivotal.gemfirexd.internal.client.am.Connection.initialize();
  Properties props1 = new Properties();
  int mport = AvailablePort.getRandomAvailablePort(AvailablePort.JGROUPS);
  props1.put("mcast-port", String.valueOf(mport));
  setupConnection(props1);
  Connection conn = TestUtil.getConnection(props1);
  conn.createStatement().execute("create schema trade");
  Properties props = new Properties();
  props.setProperty("single-hop-enabled", "true");
  props.setProperty("single-hop-max-connections", "5");
  Connection connShop = startNetserverAndGetLocalNetConnection(props);
  assertEquals(100, com.pivotal.gemfirexd.internal.client.am.Connection.SINGLE_HOP_MAX_CONN_PER_SERVER);
  Statement s = connShop.createStatement();
  s.execute("set current schema trade");
  s.execute("create table example(c1 int not null, c2 int not null primary key) partition by primary key");
  s.execute("insert into trade.example values(1, 2), (2, 3)");
  PreparedStatement ps = connShop.prepareStatement("select * from example where c2 = ?");
  ps.setInt(1, 2);
  ps.execute();
  ResultSet rs = ps.getResultSet();
  rs.next();
  assertEquals(2, rs.getInt(2));
  System.clearProperty("gemfirexd.client.single-hop-max-connections");
  com.pivotal.gemfirexd.internal.client.am.Connection.initialize();
}
 
源代码15 项目: carbon-device-mgt   文件: MySQLOperationDAOImpl.java
@Override
public boolean updateOperationStatus(int enrolmentId, int operationId, Operation.Status status)
        throws OperationManagementDAOException {
    PreparedStatement stmt = null;
    boolean isUpdated = false;
    try {
        long time = System.currentTimeMillis() / 1000;
        Connection connection = OperationManagementDAOFactory.getConnection();
        stmt = connection.prepareStatement("SELECT STATUS, UPDATED_TIMESTAMP FROM DM_ENROLMENT_OP_MAPPING " +
                                           "WHERE ENROLMENT_ID=? and OPERATION_ID=? FOR UPDATE");
        stmt.setString(1, status.toString());
        stmt.setLong(2, time);
        if (stmt.execute()) {
            OperationManagementDAOUtil.cleanupResources(stmt);
            stmt = connection.prepareStatement("UPDATE DM_ENROLMENT_OP_MAPPING SET STATUS=?, UPDATED_TIMESTAMP=? " +
                                               "WHERE ENROLMENT_ID=? and OPERATION_ID=?");
            stmt.setString(1, status.toString());
            stmt.setLong(2, time);
            stmt.setInt(3, enrolmentId);
            stmt.setInt(4, operationId);
            int numOfRecordsUpdated = stmt.executeUpdate();
            if (numOfRecordsUpdated != 0) {
                isUpdated = true;
            }
        }
    } catch (SQLException e) {
        throw new OperationManagementDAOException("Error occurred while update device mapping operation status " +
                                                  "metadata", e);
    } finally {
        OperationManagementDAOUtil.cleanupResources(stmt);
    }
    return isUpdated;
}
 
源代码16 项目: MyTown2   文件: MyTownDatasource.java
public boolean deleteFlag(Flag flag, Plot plot) {
    try {
        PreparedStatement deleteFlagStatement = prepare("DELETE FROM " + prefix + "PlotFlags WHERE name=? AND plotID=?", true);
        deleteFlagStatement.setString(1, flag.flagType.toString());
        deleteFlagStatement.setInt(2, plot.getDbID());
        deleteFlagStatement.execute();

        plot.flagsContainer.remove(flag.flagType);
    } catch (SQLException e) {
        LOG.error("Failed to delete flag {}!", flag.flagType.toString());
        LOG.error(ExceptionUtils.getStackTrace(e));
        return false;
    }
    return true;
}
 
源代码17 项目: phoenix   文件: QueryDatabaseMetaDataIT.java
@Test
public void testCreateOnExistingTable() throws Exception {
    try (PhoenixConnection pconn =
            DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class)) {
        String tableName = generateUniqueName();// MDTEST_NAME;
        String schemaName = "";// MDTEST_SCHEMA_NAME;
        byte[] cfA = Bytes.toBytes(SchemaUtil.normalizeIdentifier("a"));
        byte[] cfB = Bytes.toBytes(SchemaUtil.normalizeIdentifier("b"));
        byte[] cfC = Bytes.toBytes("c");
        byte[][] familyNames = new byte[][] { cfB, cfC };
        byte[] htableName = SchemaUtil.getTableNameAsBytes(schemaName, tableName);
        Admin admin = pconn.getQueryServices().getAdmin();
        try {
            admin.disableTable(TableName.valueOf(htableName));
            admin.deleteTable(TableName.valueOf(htableName));
            admin.enableTable(TableName.valueOf(htableName));
        } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
        }

        TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(htableName));
        for (byte[] familyName : familyNames) {
            builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(familyName));
        }
        admin.createTable(builder.build());
        createMDTestTable(pconn, tableName,
            "a." + ColumnFamilyDescriptorBuilder.BLOCKSIZE+ "=" + 50000);

        TableDescriptor descriptor = admin.getDescriptor(TableName.valueOf(htableName));
        assertEquals(3, descriptor.getColumnFamilies().length);
        ColumnFamilyDescriptor cdA = descriptor.getColumnFamily(cfA);
        assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED, cdA.getKeepDeletedCells());
        assertNotEquals(ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKSIZE, cdA.getBlocksize());
        assertEquals(DataBlockEncoding.NONE, cdA.getDataBlockEncoding()); // Overriden using
                                                                          // WITH
        assertEquals(1, cdA.getMaxVersions());// Overriden using WITH
        ColumnFamilyDescriptor cdB = descriptor.getColumnFamily(cfB);
        // Allow KEEP_DELETED_CELLS to be false for VIEW
        assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED, cdB.getKeepDeletedCells());
        assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKSIZE, cdB.getBlocksize());
        assertEquals(DataBlockEncoding.NONE, cdB.getDataBlockEncoding()); // Should keep the
                                                                          // original value.
        // CF c should stay the same since it's not a Phoenix cf.
        ColumnFamilyDescriptor cdC = descriptor.getColumnFamily(cfC);
        assertNotNull("Column family not found", cdC);
        assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED, cdC.getKeepDeletedCells());
        assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKSIZE, cdC.getBlocksize());
        assertFalse(SchemaUtil.DEFAULT_DATA_BLOCK_ENCODING == cdC.getDataBlockEncoding());
        assertTrue(descriptor.hasCoprocessor(UngroupedAggregateRegionObserver.class.getName()));
        assertTrue(descriptor.hasCoprocessor(GroupedAggregateRegionObserver.class.getName()));
        assertTrue(descriptor.hasCoprocessor(ServerCachingEndpointImpl.class.getName()));
        admin.close();

        int rowCount = 5;
        String upsert = "UPSERT INTO " + tableName + "(id,col1,col2) VALUES(?,?,?)";
        PreparedStatement ps = pconn.prepareStatement(upsert);
        for (int i = 0; i < rowCount; i++) {
            ps.setString(1, Integer.toString(i));
            ps.setInt(2, i + 1);
            ps.setInt(3, i + 2);
            ps.execute();
        }
        pconn.commit();
        String query = "SELECT count(1) FROM " + tableName;
        ResultSet rs = pconn.createStatement().executeQuery(query);
        assertTrue(rs.next());
        assertEquals(rowCount, rs.getLong(1));

        query = "SELECT id, col1,col2 FROM " + tableName;
        rs = pconn.createStatement().executeQuery(query);
        for (int i = 0; i < rowCount; i++) {
            assertTrue(rs.next());
            assertEquals(Integer.toString(i), rs.getString(1));
            assertEquals(i + 1, rs.getInt(2));
            assertEquals(i + 2, rs.getInt(3));
        }
        assertFalse(rs.next());
    }
}
 
源代码18 项目: alfresco-repository   文件: ExportDb.java
/**
 * Retrieves and process DB sequences for the schema validation check.
 * 
 * @param dbmd
 *            the database meta data
 * @param script
 *            the SQL script
 * @param schemaName
 *            the DB schema name
 * @param prefixFilter
 *            the DB tables prefix filter
 */
private void retrieveAndProcessSequences(DatabaseMetaData dbmd, String script, String schemaName, String prefixFilter)
        throws SQLException, IllegalArgumentException, IllegalAccessException
{
    if (log.isDebugEnabled())
    {
        log.debug("Retrieving DB sequences...");
    }

    PreparedStatement stmt = null;
    try
    {
        stmt = dbmd.getConnection().prepareStatement(script);
        stmt.setString(1, schemaName);
        stmt.setString(2, prefixFilter);

        boolean haveResults = stmt.execute();

        if (haveResults)
        {
            ResultSet sequences = stmt.getResultSet();

            if (log.isDebugEnabled())
            {
                log.debug("Sequences processing started...");
            }

            processTables(dbmd, sequences);

            if (log.isDebugEnabled())
            {
                log.debug("Sequences processing completed.");
            }
        }
    }
    finally
    {
        if (stmt != null)
        {
            try
            {
                stmt.close();
            }
            catch (Throwable e)
            {
                // Little can be done at this stage.
            }
        }
    }
}
 
源代码19 项目: phoenix   文件: DeleteTest.java
private void testDeleteAllFromTable(boolean autoCommit) throws SQLException {
    Connection con = null;
    try {
        con = DriverManager.getConnection(getUrl());
        con.setAutoCommit(autoCommit);

        Statement stm = con.createStatement();
        stm.execute("CREATE TABLE IF NOT EXISTS web_stats (" +
                "HOST CHAR(2) NOT NULL," +
                "DOMAIN VARCHAR NOT NULL, " +
                "FEATURE VARCHAR NOT NULL, " +
                "DATE DATE NOT NULL, \n" + 
                "USAGE.CORE BIGINT," +
                "USAGE.DB BIGINT," +
                "STATS.ACTIVE_VISITOR INTEGER " +
                "CONSTRAINT PK PRIMARY KEY (HOST, DOMAIN, FEATURE, DATE))");
        stm.close();

        PreparedStatement psInsert = con
                .prepareStatement("UPSERT INTO web_stats(HOST, DOMAIN, FEATURE, DATE, CORE, DB, ACTIVE_VISITOR) VALUES(?,?, ? , ?, ?, ?, ?)");
        psInsert.setString(1, "AA");
        psInsert.setString(2, "BB");
        psInsert.setString(3, "CC");
        psInsert.setDate(4, new Date(0));
        psInsert.setLong(5, 1L);
        psInsert.setLong(6, 2L);
        psInsert.setLong(7, 3);
        psInsert.execute();
        psInsert.close();
        if (!autoCommit) {
            con.commit();
        }
        
        con.createStatement().execute("DELETE FROM web_stats");
        if (!autoCommit) {
            con.commit();
        }
        
        ResultSet rs = con.createStatement().executeQuery("SELECT /*+ NO_INDEX */ count(*) FROM web_stats");
        assertTrue(rs.next());
        assertEquals(0, rs.getLong(1));
    } finally {
        try {
            con.close();
        } catch (Exception ex) {
        }
    }
}
 
源代码20 项目: crate   文件: SQLTransportExecutor.java
private static SQLResponse executeAndConvertResult(PreparedStatement preparedStatement) throws SQLException {
    if (preparedStatement.execute()) {
        ResultSetMetaData metaData = preparedStatement.getMetaData();
        ResultSet resultSet = preparedStatement.getResultSet();
        List<Object[]> rows = new ArrayList<>();
        List<String> columnNames = new ArrayList<>(metaData.getColumnCount());
        DataType[] dataTypes = new DataType[metaData.getColumnCount()];
        for (int i = 0; i < metaData.getColumnCount(); i++) {
            columnNames.add(metaData.getColumnName(i + 1));
        }
        while (resultSet.next()) {
            Object[] row = new Object[metaData.getColumnCount()];
            for (int i = 0; i < row.length; i++) {
                Object value;
                String typeName = metaData.getColumnTypeName(i + 1);
                value = getObject(resultSet, i, typeName);
                row[i] = value;
            }
            rows.add(row);
        }
        return new SQLResponse(
            columnNames.toArray(new String[0]),
            rows.toArray(new Object[0][]),
            dataTypes,
            rows.size()
        );
    } else {
        int updateCount = preparedStatement.getUpdateCount();
        if (updateCount < 0) {
            /*
             * In Crate -1 means row-count unknown, and -2 means error. In JDBC -2 means row-count unknown and -3 means error.
             * See {@link java.sql.Statement#EXECUTE_FAILED}
             */
            updateCount += 1;
        }
        return new SQLResponse(
            new String[0],
            new Object[0][],
            new DataType[0],
            updateCount
        );
    }
}