java.sql.ResultSet#isAfterLast ( )源码实例Demo

下面列出了java.sql.ResultSet#isAfterLast ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

@Test public void testCommonCursorStates() throws SQLException {
  final ResultSet resultSet = getResultSet();

  // right after statement execution, result set is before first row
  assertTrue(resultSet.isBeforeFirst());

  // checking that return values of next and isAfterLast are coherent
  for (int c = 0; c < 3 && !resultSet.isAfterLast(); ++c) {
    assertTrue(resultSet.next() != resultSet.isAfterLast());
  }

  // result set is not closed yet, despite fully consumed
  assertFalse(resultSet.isClosed());

  resultSet.close();

  // result set is now closed
  assertTrue(resultSet.isClosed());

  // once closed, next should fail
  thrown.expect(SQLException.class);
  resultSet.next();
}
 
源代码2 项目: wasindoor   文件: ReadingInMeasurementHome.java
/**
 * @see EntityHome#parseResultRow(ResultSet, int)
 */
@Override
public ReadingInMeasurement parseResultRow(ResultSet rs, int fromIndex)
		throws SQLException {
	ReadingInMeasurement rinm = new ReadingInMeasurement();
	
	try {
		if (!rs.isAfterLast()) {
			rinm.setId(rs.getInt(fromIndex));
			rinm.setMeasurementId(rs.getInt(fromIndex + 1));
			rinm.setReadingId(rs.getInt(fromIndex + 2));
			rinm.setReadingClassName(rs.getString(fromIndex + 3));
		}
	} catch (SQLException e) {
		log.log(Level.SEVERE, "parseResultRow failed: " + e.getMessage(), e);
		throw e;
	}
	
	return rinm;
}
 
源代码3 项目: quark   文件: JdbcDB.java
private ImmutableMap<String, Schema> getSchemaFromResultSet(ResultSet rs,
                                                            ImmutableMap<String, Integer>
                                                                dataTypes)
    throws SQLException {
  if (rs == null || !rs.next()) {
    return ImmutableMap.of();
  }
  ImmutableMap.Builder<String, Schema> schemaBuilder = new ImmutableMap.Builder<>();

  while (!rs.isAfterLast()) {
    String currentSchema = rs.getString(1);
    String schemaKey = currentSchema;
    if (!this.isCaseSensitive()) {
      schemaKey = currentSchema.toUpperCase();
    }

    schemaBuilder.put(schemaKey, new JdbcSchema(currentSchema,
        rs, this.isCaseSensitive(), dataTypes));
  }
  return schemaBuilder.build();
}
 
@Test(expected = SQLFeatureNotSupportedException.class)
public void assertIsAfterLast() throws SQLException {
    for (ResultSet each : resultSets) {
        each.isAfterLast();
    }
}
 
源代码5 项目: lams   文件: Loader.java
/**
 * Loads a single logical row from the result set moving forward.  This is the
 * processing used from the ScrollableResults where there were collection fetches
 * encountered; thus a single logical row may have multiple rows in the underlying
 * result set.
 *
 * @param resultSet The result set from which to do the load.
 * @param session The session from which the request originated.
 * @param queryParameters The query parameters specified by the user.
 * @param returnProxies Should proxies be generated
 *
 * @return The loaded "row".
 *
 * @throws HibernateException
 */
public Object loadSequentialRowsForward(
		final ResultSet resultSet,
		final SharedSessionContractImplementor session,
		final QueryParameters queryParameters,
		final boolean returnProxies) throws HibernateException {

	// note that for sequential scrolling, we make the assumption that
	// the first persister element is the "root entity"

	try {
		if ( resultSet.isAfterLast() ) {
			// don't even bother trying to read further
			return null;
		}

		if ( resultSet.isBeforeFirst() ) {
			resultSet.next();
		}

		// We call getKeyFromResultSet() here so that we can know the
		// key value upon which to perform the breaking logic.  However,
		// it is also then called from getRowFromResultSet() which is certainly
		// not the most efficient.  But the call here is needed, and there
		// currently is no other way without refactoring of the doQuery()/getRowFromResultSet()
		// methods
		final EntityKey currentKey = getKeyFromResultSet(
				0,
				getEntityPersisters()[0],
				null,
				resultSet,
				session
		);

		return sequentialLoad( resultSet, session, queryParameters, returnProxies, currentKey );
	}
	catch (SQLException sqle) {
		throw factory.getJdbcServices().getSqlExceptionHelper().convert(
				sqle,
				"could not perform sequential read of results (forward)",
				getSQLString()
		);
	}
}
 
源代码6 项目: wasindoor   文件: MeasurementHome.java
@Override
public synchronized Measurement parseResultRow(final ResultSet rs, int fromIndex)
		throws SQLException {
	Measurement m = new Measurement();
	
	try {
		if (!rs.isAfterLast()) {
			int mId = rs.getInt(fromIndex);
			m.setId(mId);
			m.setTimestamp(rs.getLong(fromIndex + 1));
			String readingClassName = rs.getString(fromIndex + 2);
			
			if (readingClassName == null) {
				// there are no readings in measurement
				rs.next();
			} else {	
				while(!rs.isAfterLast() && mId == rs.getInt(fromIndex) ) {
					readingClassName = rs.getString(fromIndex + 2);
					if (HomeFactory.getWiFiReadingVectorHome().getContainedObjectClassName().equals(readingClassName)) {
						m.setWiFiReadings(HomeFactory.getWiFiReadingVectorHome().parseResultRow(rs, fromIndex + 3));
					} else if (HomeFactory.getGSMReadingVectorHome().getContainedObjectClassName().equals(readingClassName)) {
						m.setGSMReadings(HomeFactory.getGSMReadingVectorHome().parseResultRow(rs, fromIndex + 3 + HomeFactory.getWiFiReadingHome().getTableCols().length + 1));
					} else if (HomeFactory.getBluetoothReadingVectorHome().getContainedObjectClassName().equals(readingClassName)) {
						m.setBluetoothReadings(HomeFactory.getBluetoothReadingVectorHome().parseResultRow(rs, fromIndex + 3 + HomeFactory.getGSMReadingHome().getTableCols().length + 1 + HomeFactory.getWiFiReadingHome().getTableCols().length + 1));
					} else {
						log.fine("Result row has no matching readingClassName " + readingClassName);
						rs.next();
					}
				}
				/*
				 * adjust pointer to row because the last VectorHome read one row to far to see if readings are finished
				 * Unfortunately not possible because SQLite JDBC driver only supports forward cursors.
				 * See comment in FingerprintHome#get(String)
				 * 
				 * rs.previous();
				 */
				
			}
		}
	} catch (SQLException e) {
		log.log(Level.SEVERE, "parseResultRow failed: " + e.getMessage(), e);
		throw e;
	}
	
	return m;

}
 
源代码7 项目: wasindoor   文件: FingerprintHome.java
@Override
protected synchronized  List<Fingerprint> get(String constraint) {
	List<Fingerprint> res = new ArrayList<Fingerprint>();
	
	String sql = getSelectSQL();
	if (constraint != null && constraint.length() > 0) sql += " WHERE " + constraint;
	String order = getOrder();
	if (order != null && order.length() > 0) sql += " ORDER BY " + order;
	
	
	log.finest(sql);
 
	ResultSet rs = null;

	try {
 
		rs =DaoUtil.queryData(sql, new Object[]{});
		boolean first = true;
		while(!rs.isAfterLast()) {
			/*
			 * only advance cursor the first time, because the reading vector homes (WiFiReadingVectorHome#parseResultRow(), ...)
			 * do advance the cursor one row to far to know whether there are all reading of that type fetched.
			 * If we advance the cursor one more time here, we miss one row.
			 * Unfortunately we can't go one row back (would be a cleaner solution) because the SQLite driver does only support forward cursors
			 */
			
			if(first) {
				if(!rs.next()) {
					//empty result set
					break;
				}
				first = false;
			}

			res.add(parseResultRow(rs));
		}
	} catch (SQLException e) {
		log.log(Level.SEVERE, "get failed: " + e.getMessage(), e);
	} finally {
		try {
			if (rs != null) rs.close();
 
		} catch (SQLException es) {
			log.log(Level.WARNING, "failed to close ResultSet: " + es.getMessage(), es);
		}
	}
	
	return res;
}
 
@Test(expected = SQLFeatureNotSupportedException.class)
public void assertIsAfterLast() throws SQLException {
    for (ResultSet each : resultSets) {
        each.isAfterLast();
    }
}
 
源代码9 项目: cacheonix-core   文件: Loader.java
/**
 * Loads a single logical row from the result set moving forward.  This is the
 * processing used from the ScrollableResults where there were collection fetches
 * encountered; thus a single logical row may have multiple rows in the underlying
 * result set.
 *
 * @param resultSet The result set from which to do the load.
 * @param session The session from which the request originated.
 * @param queryParameters The query parameters specified by the user.
 * @param returnProxies Should proxies be generated
 * @return The loaded "row".
 * @throws HibernateException
 */
public Object loadSequentialRowsForward(
        final ResultSet resultSet,
        final SessionImplementor session,
        final QueryParameters queryParameters,
        final boolean returnProxies) throws HibernateException {

	// note that for sequential scrolling, we make the assumption that
	// the first persister element is the "root entity"

	try {
		if ( resultSet.isAfterLast() ) {
			// don't even bother trying to read further
			return null;
		}

		if ( resultSet.isBeforeFirst() ) {
			resultSet.next();
		}

		// We call getKeyFromResultSet() here so that we can know the
		// key value upon which to perform the breaking logic.  However,
		// it is also then called from getRowFromResultSet() which is certainly
		// not the most efficient.  But the call here is needed, and there
		// currently is no other way without refactoring of the doQuery()/getRowFromResultSet()
		// methods
		final EntityKey currentKey = getKeyFromResultSet(
				0,
				getEntityPersisters()[0],
				null,
				resultSet,
				session
			);

		return sequentialLoad( resultSet, session, queryParameters, returnProxies, currentKey );
	}
	catch ( SQLException sqle ) {
		throw JDBCExceptionHelper.convert(
		        factory.getSQLExceptionConverter(),
		        sqle,
		        "could not perform sequential read of results (forward)",
		        getSQLString()
			);
	}
}
 
源代码10 项目: quark   文件: JdbcSchema.java
public JdbcSchema(String name, ResultSet resultSet, boolean isCaseSensitive,
                  ImmutableMap<String, Integer> dataTypes) throws SQLException {
  super(isCaseSensitive ? name : name.toUpperCase());

  ImmutableMap.Builder<String, Table> tableBuilder = new ImmutableMap.Builder<>();
  while (!resultSet.isAfterLast() && resultSet.getString(1).equals(name)) {
    ImmutableList.Builder<QuarkColumn> columnBuilder = new ImmutableList.Builder<>();
    String currentTable = resultSet.getString(2);
    while (resultSet.getString(2).equals(currentTable)) {
      String columnName = resultSet.getString(3);
      if (!isCaseSensitive) {
        columnName = columnName.toUpperCase();
      }
      Integer dataType = null;
      for (String key: dataTypes.keySet()) {
        if (resultSet.getString(4).toUpperCase().matches(key)) {
          dataType = dataTypes.get(key);
          break;
        }
      }

      if (dataType == null) {
        throw new SQLException("DataType `" + resultSet.getString(4) + "` is not supported");
      }

      columnBuilder.add(new QuarkColumn(columnName, dataType));
      LOG.debug("Adding column:  " + resultSet.getString(1) + " : "
          + resultSet.getString(2) + " : "
          + resultSet.getString(3) + " : "
          + resultSet.getString(4));
      if (!resultSet.next()) {
        break;
      }
    }

    if (!isCaseSensitive) {
      currentTable = currentTable.toUpperCase();
    }
    tableBuilder.put(currentTable, new QuarkTable(this, currentTable, columnBuilder.build()));
  }

  tableMap = tableBuilder.build();
}
 
源代码11 项目: phoenix   文件: AlterMultiTenantTableWithViewsIT.java
public static void assertTableDefinition(Connection conn, String fullTableName, PTableType tableType, String parentTableName, int sequenceNumber, int columnCount, int baseColumnCount, String... columnName) throws Exception {
    String schemaName = SchemaUtil.getSchemaNameFromFullName(fullTableName);
    String tableName = SchemaUtil.getTableNameFromFullName(fullTableName);
    PreparedStatement p = conn.prepareStatement("SELECT * FROM \"SYSTEM\".\"CATALOG\" WHERE TABLE_SCHEM=? AND TABLE_NAME=? AND TABLE_TYPE=?");
    p.setString(1, schemaName);
    p.setString(2, tableName);
    p.setString(3, tableType.getSerializedValue());
    ResultSet rs = p.executeQuery();
    assertTrue(rs.next());
    assertEquals(AlterTableWithViewsIT.getSystemCatalogEntriesForTable(conn, fullTableName, "Mismatch in BaseColumnCount"), baseColumnCount, rs.getInt("BASE_COLUMN_COUNT"));
    assertEquals(AlterTableWithViewsIT.getSystemCatalogEntriesForTable(conn, fullTableName, "Mismatch in columnCount"), columnCount, rs.getInt("COLUMN_COUNT"));
    assertEquals(AlterTableWithViewsIT.getSystemCatalogEntriesForTable(conn, fullTableName, "Mismatch in sequenceNumber"), sequenceNumber, rs.getInt("TABLE_SEQ_NUM"));
    rs.close();

    ResultSet parentTableColumnsRs = null; 
    if (parentTableName != null) {
        parentTableColumnsRs = conn.getMetaData().getColumns(null, null, parentTableName, null);
        parentTableColumnsRs.next();
    }
    
    ResultSet viewColumnsRs = conn.getMetaData().getColumns(null, schemaName, tableName, null);
    for (int i = 0; i < columnName.length; i++) {
        if (columnName[i] != null) {
            assertTrue(viewColumnsRs.next());
            assertEquals(AlterTableWithViewsIT.getSystemCatalogEntriesForTable(conn, fullTableName, "Mismatch in columnName: i=" + i), columnName[i], viewColumnsRs.getString(PhoenixDatabaseMetaData.COLUMN_NAME));
            int viewColOrdinalPos = viewColumnsRs.getInt(PhoenixDatabaseMetaData.ORDINAL_POSITION);
            assertEquals(AlterTableWithViewsIT.getSystemCatalogEntriesForTable(conn, fullTableName, "Mismatch in ordinalPosition: i=" + i), i+1, viewColOrdinalPos);
            // validate that all the columns in the base table are present in the view   
            if (parentTableColumnsRs != null && !parentTableColumnsRs.isAfterLast()) {
                ResultSetMetaData parentTableColumnsMetadata = parentTableColumnsRs.getMetaData();
                assertEquals(parentTableColumnsMetadata.getColumnCount(), viewColumnsRs.getMetaData().getColumnCount());
                int parentTableColOrdinalRs = parentTableColumnsRs.getInt(PhoenixDatabaseMetaData.ORDINAL_POSITION);
                assertEquals(AlterTableWithViewsIT.getSystemCatalogEntriesForTable(conn, fullTableName, "Mismatch in ordinalPosition of view and base table for i=" + i), parentTableColOrdinalRs, viewColOrdinalPos);
                for (int columnIndex = 1; columnIndex < parentTableColumnsMetadata.getColumnCount(); columnIndex++) {
                    String viewColumnValue = viewColumnsRs.getString(columnIndex);
                    String parentTableColumnValue = parentTableColumnsRs.getString(columnIndex);
                    if (!Objects.equal(viewColumnValue, parentTableColumnValue)) {
                        if (parentTableColumnsMetadata.getColumnName(columnIndex).equals(PhoenixDatabaseMetaData.TABLE_NAME)) {
                            assertEquals(parentTableName, parentTableColumnValue);
                            assertEquals(fullTableName, viewColumnValue);
                        } 
                    }
                }
                parentTableColumnsRs.next();
            }
        }
    }
    assertFalse(AlterTableWithViewsIT.getSystemCatalogEntriesForTable(conn, fullTableName, ""), viewColumnsRs.next());
}