类org.apache.hadoop.hbase.client.HConnection源码实例Demo

下面列出了怎么用org.apache.hadoop.hbase.client.HConnection的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: titan1withtp3.1   文件: HBaseStorageSetup.java
public synchronized static void waitForConnection(long timeout, TimeUnit timeoutUnit) {
    long before = System.currentTimeMillis();
    long after;
    long timeoutMS = TimeUnit.MILLISECONDS.convert(timeout, timeoutUnit);
    do {
        try {
            HConnection hc = HConnectionManager.createConnection(HBaseConfiguration.create());
            hc.close();
            after = System.currentTimeMillis();
            log.info("HBase server to started after about {} ms", after - before);
            return;
        } catch (IOException e) {
            log.info("Exception caught while waiting for the HBase server to start", e);
        }
        after = System.currentTimeMillis();
    } while (timeoutMS > after - before);
    after = System.currentTimeMillis();
    log.warn("HBase server did not start in {} ms", after - before);
}
 
源代码2 项目: hadoop-arch-book   文件: RunLocalTest.java
private static void populateUserProfileData(HConnection connection) throws Exception {
  UserProfile up1 = new UserProfile();
  up1.userId = "101";
  up1.lastUpdatedTimeStamp = System.currentTimeMillis() - 1000;
  up1.historicAvg90PercentSingleDaySpend = 90.0;
  up1.historicAvgSingleDaySpend = 50.0;
  up1.todayMaxSpend = 0.0;
  up1.todayNumOfPurchases = 0l;
  HBaseUtils.populateUserProfile(connection, up1);

  up1.userId = "102";
  up1.lastUpdatedTimeStamp = System.currentTimeMillis() - 1000;
  up1.historicAvg90PercentSingleDaySpend = 90.0;
  up1.historicAvgSingleDaySpend = 50.0;
  up1.todayMaxSpend = 0.0;
  up1.todayNumOfPurchases = 0l;
  HBaseUtils.populateUserProfile(connection, up1);

  up1.userId = "103";
  up1.lastUpdatedTimeStamp = System.currentTimeMillis() - 1000;
  up1.historicAvg90PercentSingleDaySpend = 90.0;
  up1.historicAvgSingleDaySpend = 50.0;
  up1.todayMaxSpend = 0.0;
  up1.todayNumOfPurchases = 0l;
  HBaseUtils.populateUserProfile(connection, up1);
}
 
源代码3 项目: Kylin   文件: GridTableHBaseBenchmark.java
public static void testGridTable(double hitRatio, double indexRatio) throws IOException {
    System.out.println("Testing grid table scanning, hit ratio " + hitRatio + ", index ratio " + indexRatio);
    String hbaseUrl = "hbase"; // use hbase-site.xml on classpath

    HConnection conn = HBaseConnection.get(hbaseUrl);
    createHTableIfNeeded(conn, TEST_TABLE);
    prepareData(conn);

    Hits hits = new Hits(N_ROWS, hitRatio, indexRatio);

    for (int i = 0; i < ROUND; i++) {
        System.out.println("==================================== ROUND " + (i + 1) + " ========================================");
        testRowScanWithIndex(conn, hits.getHitsForRowScanWithIndex());
        testRowScanNoIndexFullScan(conn, hits.getHitsForRowScanNoIndex());
        testRowScanNoIndexSkipScan(conn, hits.getHitsForRowScanNoIndex());
        testColumnScan(conn, hits.getHitsForColumnScan());
    }

}
 
源代码4 项目: Kylin   文件: GridTableHBaseBenchmark.java
private static void fullScan(HConnection conn, boolean[] hits, Stats stats) throws IOException {
    HTableInterface table = conn.getTable(TEST_TABLE);
    try {
        stats.markStart();

        Scan scan = new Scan();
        scan.addFamily(CF);
        ResultScanner scanner = table.getScanner(scan);
        int i = 0;
        for (Result r : scanner) {
            if (hits[i])
                stats.consume(r);
            dot(i, N_ROWS);
            i++;
        }

        stats.markEnd();
    } finally {
        IOUtils.closeQuietly(table);
    }
}
 
源代码5 项目: Kylin   文件: ImportHBaseData.java
public void setup() throws IOException {

        KylinConfig.destoryInstance();
        System.setProperty(KylinConfig.KYLIN_CONF, AbstractKylinTestCase.SANDBOX_TEST_DATA);

        kylinConfig = KylinConfig.getInstanceFromEnv();
        cli = kylinConfig.getCliCommandExecutor();

        String metadataUrl = kylinConfig.getMetadataUrl();
        // split [email protected]_URL
        int cut = metadataUrl.indexOf('@');
        tableNameBase = metadataUrl.substring(0, cut);
        String hbaseUrl = cut < 0 ? metadataUrl : metadataUrl.substring(cut + 1);

        HConnection conn = HBaseConnection.get(hbaseUrl);
        try {
            hbase = new HBaseAdmin(conn);
            config = hbase.getConfiguration();
            //allTables = hbase.listTables();
        } catch (IOException e) {
            e.printStackTrace();
            throw e;
        }

        uploadTarballToRemote();
    }
 
源代码6 项目: Kylin   文件: HBaseConnection.java
public static HConnection get(String url) {
    // find configuration
    Configuration conf = ConfigCache.get(url);
    if (conf == null) {
        conf = HadoopUtil.newHBaseConfiguration(url);
        ConfigCache.put(url, conf);
    }

    HConnection connection = ConnPool.get(url);
    try {
        // I don't use DCL since recreate a connection is not a big issue.
        if (connection == null) {
            connection = HConnectionManager.createConnection(conf);
            ConnPool.put(url, connection);
        }
    } catch (Throwable t) {
        throw new StorageException("Error when open connection " + url, t);
    }

    return connection;
}
 
源代码7 项目: Kylin   文件: CubeSegmentTupleIterator.java
public CubeSegmentTupleIterator(CubeSegment cubeSeg, Collection<HBaseKeyRange> keyRanges, HConnection conn, Collection<TblColRef> dimensions, TupleFilter filter, Collection<TblColRef> groupBy, Collection<RowValueDecoder> rowValueDecoders, StorageContext context) {
    this.cube = cubeSeg.getCubeInstance();
    this.cubeSeg = cubeSeg;
    this.dimensions = dimensions;
    this.filter = filter;
    this.groupBy = groupBy;
    this.rowValueDecoders = rowValueDecoders;
    this.context = context;
    this.tableName = cubeSeg.getStorageLocationIdentifier();
    this.rowKeyDecoder = new RowKeyDecoder(this.cubeSeg);
    this.scanCount = 0;

    try {
        this.table = conn.getTable(tableName);
    } catch (Throwable t) {
        throw new StorageException("Error when open connection to table " + tableName, t);
    }
    this.rangeIterator = keyRanges.iterator();
    scanNextRange();
}
 
源代码8 项目: Kylin   文件: SerializedHBaseTupleIterator.java
public SerializedHBaseTupleIterator(HConnection conn, List<HBaseKeyRange> segmentKeyRanges, CubeInstance cube, Collection<TblColRef> dimensions, TupleFilter filter, Collection<TblColRef> groupBy, Collection<RowValueDecoder> rowValueDecoders, StorageContext context) {

        this.context = context;
        int limit = context.getLimit();
        this.partialResultLimit = Math.max(limit, PARTIAL_DEFAULT_LIMIT);

        this.segmentIteratorList = new ArrayList<CubeSegmentTupleIterator>(segmentKeyRanges.size());
        Map<CubeSegment, List<HBaseKeyRange>> rangesMap = makeRangesMap(segmentKeyRanges);
        for (Map.Entry<CubeSegment, List<HBaseKeyRange>> entry : rangesMap.entrySet()) {
            CubeSegmentTupleIterator segIter = new CubeSegmentTupleIterator(entry.getKey(), entry.getValue(), conn, dimensions, filter, groupBy, rowValueDecoders, context);
            this.segmentIteratorList.add(segIter);
        }

        this.segmentIteratorIterator = this.segmentIteratorList.iterator();
        if (this.segmentIteratorIterator.hasNext()) {
            this.segmentIterator = this.segmentIteratorIterator.next();
        } else {
            this.segmentIterator = ITupleIterator.EMPTY_TUPLE_ITERATOR;
        }
    }
 
源代码9 项目: opensoc-streaming   文件: CIFHbaseAdapter.java
@Override
public boolean initializeAdapter() {

	// Initialize HBase Table
	Configuration conf = null;
	conf = HBaseConfiguration.create();
	conf.set("hbase.zookeeper.quorum", _quorum);
	conf.set("hbase.zookeeper.property.clientPort", _port);

	try {
		LOGGER.debug("=======Connecting to HBASE===========");
		LOGGER.debug("=======ZOOKEEPER = "
				+ conf.get("hbase.zookeeper.quorum"));
		HConnection connection = HConnectionManager.createConnection(conf);
		table = connection.getTable(_tableName);
		return true;
	} catch (IOException e) {
		// TODO Auto-generated catch block
		LOGGER.debug("=======Unable to Connect to HBASE===========");
		e.printStackTrace();
	}

	return false;
}
 
源代码10 项目: opensoc-streaming   文件: ThreatHbaseAdapter.java
@Override
public boolean initializeAdapter() {

	// Initialize HBase Table
	Configuration conf = null;
	conf = HBaseConfiguration.create();
	conf.set("hbase.zookeeper.quorum", _quorum);
	conf.set("hbase.zookeeper.property.clientPort", _port);

	try {
		LOGGER.debug("=======Connecting to HBASE===========");
		LOGGER.debug("=======ZOOKEEPER = "
				+ conf.get("hbase.zookeeper.quorum"));
		HConnection connection = HConnectionManager.createConnection(conf);
		table = connection.getTable(_tableName);
		return true;
	} catch (IOException e) {
		// TODO Auto-generated catch block
		LOGGER.debug("=======Unable to Connect to HBASE===========");
		e.printStackTrace();
	}

	return false;
}
 
源代码11 项目: zerowing   文件: Tailer.java
public Tailer(Configuration conf, Mongo mongo, HConnection hbase, String tailerName) {
  _conf = conf;
  _mongo = mongo;
  _hbase = hbase;
  _knownTables = new HashMap<String, HTable>();
  _translator = ConfigUtil.getTranslator(_conf);

  _stateTable = createStateTable();

  if (tailerName == null) {
    List<ServerAddress> addresses = _mongo.getAllAddress();
    tailerName = StringUtils.join(addresses, ",");
  }

  _tailerID = tailerName.getBytes();

  _skipUpdates = ConfigUtil.getSkipUpdates(_conf);
  _skipDeletes = ConfigUtil.getSkipDeletes(_conf);
  _bufferWrites = ConfigUtil.getBufferWrites(_conf);
}
 
源代码12 项目: hbase-tools   文件: EmptyRegionChecker.java
public EmptyRegionChecker(HConnection connection, String tableName,
                          HRegionInfo regionInfo, Set<HRegionInfo> emptyRegions) {
    this.connection = connection;
    this.tableName = tableName;
    this.regionInfo = regionInfo;
    this.emptyRegions = emptyRegions;
}
 
源代码13 项目: hbase-tools   文件: EmptyRegionChecker.java
public EmptyRegionChecker(HConnection connection, String tableName,
                          HRegionInfo regionInfo, Set<HRegionInfo> emptyRegions) {
    this.connection = connection;
    this.tableName = tableName;
    this.regionInfo = regionInfo;
    this.emptyRegions = emptyRegions;
}
 
源代码14 项目: hbase-tools   文件: EmptyRegionChecker.java
public EmptyRegionChecker(HConnection connection, String tableName,
                          HRegionInfo regionInfo, Set<HRegionInfo> emptyRegions) {
    this.connection = connection;
    this.tableName = tableName;
    this.regionInfo = regionInfo;
    this.emptyRegions = emptyRegions;
}
 
源代码15 项目: hbase-tools   文件: EmptyRegionChecker.java
public EmptyRegionChecker(HConnection connection, String tableName,
                          HRegionInfo regionInfo, Set<HRegionInfo> emptyRegions) {
    this.connection = connection;
    this.tableName = tableName;
    this.regionInfo = regionInfo;
    this.emptyRegions = emptyRegions;
}
 
源代码16 项目: hbase-tools   文件: EmptyRegionChecker.java
public EmptyRegionChecker(HConnection connection, String tableName,
                          HRegionInfo regionInfo, Set<HRegionInfo> emptyRegions) {
    this.connection = connection;
    this.tableName = tableName;
    this.regionInfo = regionInfo;
    this.emptyRegions = emptyRegions;
}
 
源代码17 项目: bigdata-tutorial   文件: HBaseSimpleDemo.java
public HConnection openConn() {
	if (null == hconn) {
		try {
			this.hconn = HConnectionManager.createConnection(config);
		} catch (Exception ex) {
			ex.printStackTrace();
		}
	}
	return hconn;
}
 
源代码18 项目: bigdata-tutorial   文件: HBaseClientManager.java
public synchronized HConnection getConn() {
	if (null == conn) {
		try {
			this.conn = HConnectionManager.createConnection(config);
		} catch (Exception ex) {
			LOGGER.error("create conn err:", ex);
		}
	}
	return conn;
}
 
源代码19 项目: bigdata-tutorial   文件: HBaseConnAbstractPool.java
public synchronized HConnection getConn() {
	if (null == conn) {
		try {
			this.conn = HConnectionManager.createConnection(this.config);
		} catch (Exception ex) {
			LOGGER.error("create conn err:", ex);
		}
	}
	return conn;
}
 
源代码20 项目: bigdata-tutorial   文件: HBaseFactoryTest.java
public HConnection openConn() {
	if (null == conn) {
		try {
			this.conn = HConnectionManager.createConnection(config);
		} catch (Exception ex) {
			ex.printStackTrace();
		}
	}
	return conn;
}
 
源代码21 项目: tajo   文件: HBasePutAppender.java
@Override
public void init() throws IOException {
  super.init();

  HBaseTablespace space = (HBaseTablespace) TablespaceManager.get(uri);
  HConnection hconn = space.getConnection();
  htable = hconn.getTable(columnMapping.getHbaseTableName());
  htable.setAutoFlushTo(false);
  htable.setWriteBufferSize(5 * 1024 * 1024);
}
 
源代码22 项目: tajo   文件: HBaseTablespace.java
@Override
public void close() {
  synchronized (connMap) {
    for (HConnection eachConn: connMap.values()) {
      try {
        eachConn.close();
      } catch (Exception e) {
        LOG.error(e.getMessage(), e);
      }
    }
  }
}
 
源代码23 项目: tajo   文件: HBaseTablespace.java
public HConnection getConnection() throws IOException {
  synchronized(connMap) {
    HConnectionKey key = new HConnectionKey(hbaseConf);
    HConnection conn = connMap.get(key);
    if (conn == null) {
      conn = HConnectionManager.createConnection(hbaseConf);
      connMap.put(key, conn);
    }

    return conn;
  }
}
 
源代码24 项目: hadoop-arch-book   文件: RunLocalTest.java
private static void populateValidationRules(HConnection connection) throws Exception {
  HashSet<String> banndedVandors = new HashSet<String>();
  banndedVandors.add("badVendor");

  ValidationRules rules = new ValidationRules(banndedVandors, 2.0);

  HBaseUtils.populateValidationRules(connection, rules);
}
 
源代码25 项目: hadoop-arch-book   文件: HBaseUtils.java
public static void populateUserProfile(HConnection connection, UserProfile userProfile) throws Exception {
  HTableInterface table = connection.getTable(HBaseTableMetaModel.profileCacheTableName);

  try {
    Put put = new Put(convertKeyToRowKey(HBaseTableMetaModel.profileCacheTableName, userProfile.userId));
    put.add(HBaseTableMetaModel.profileCacheColumnFamily, HBaseTableMetaModel.profileCacheJsonColumn, Bytes.toBytes(userProfile.getJSONObject().toString()));
    put.add(HBaseTableMetaModel.profileCacheColumnFamily, HBaseTableMetaModel.profileCacheTsColumn, Bytes.toBytes(System.currentTimeMillis()));
    table.put(put);
  } finally {
    table.close();
  }
}
 
源代码26 项目: hadoop-arch-book   文件: HBaseUtils.java
public static void populateValidationRules(HConnection connection, ValidationRules rules) throws Exception {
  HTableInterface table = connection.getTable(HBaseTableMetaModel.profileCacheTableName);

  try {
    Put put = new Put(HBaseTableMetaModel.validationRulesRowKey);
    put.add(HBaseTableMetaModel.profileCacheColumnFamily, HBaseTableMetaModel.validationRulesRowKey, Bytes.toBytes(rules.getJSONObject().toString()));
    table.put(put);
  } finally {
    table.close();
  }
}
 
源代码27 项目: Kylin   文件: GridTableHBaseBenchmark.java
private static void testColumnScan(HConnection conn, List<Pair<Integer, Integer>> colScans) throws IOException {
    Stats stats = new Stats("COLUMN_SCAN");

    HTableInterface table = conn.getTable(TEST_TABLE);
    try {
        stats.markStart();

        int nLogicCols = colScans.size();
        int nLogicRows = colScans.get(0).getSecond() - colScans.get(0).getFirst();
        
        Scan[] scans = new Scan[nLogicCols];
        ResultScanner[] scanners = new ResultScanner[nLogicCols];
        for (int i = 0; i < nLogicCols; i++) {
            scans[i] = new Scan();
            scans[i].addFamily(CF);
            scanners[i] = table.getScanner(scans[i]);
        }
        for (int i = 0; i < nLogicRows; i++) {
            for (int c = 0; c < nLogicCols; c++) {
                Result r = scanners[c].next();
                stats.consume(r);
            }
            dot(i, nLogicRows);
        }
        
        stats.markEnd();
    } finally {
        IOUtils.closeQuietly(table);
    }
}
 
源代码28 项目: Kylin   文件: GridTableHBaseBenchmark.java
private static void createHTableIfNeeded(HConnection conn, String tableName) throws IOException {
    HBaseAdmin hbase = new HBaseAdmin(conn);

    try {
        boolean tableExist = false;
        try {
            hbase.getTableDescriptor(TableName.valueOf(tableName));
            tableExist = true;
        } catch (TableNotFoundException e) {
        }

        if (tableExist) {
            System.out.println("HTable '" + tableName + "' already exists");
            return;
        }

        System.out.println("Creating HTable '" + tableName + "'");

        HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));

        HColumnDescriptor fd = new HColumnDescriptor(CF);
        fd.setBlocksize(CELL_SIZE);
        desc.addFamily(fd);
        hbase.createTable(desc);

        System.out.println("HTable '" + tableName + "' created");
    } finally {
        hbase.close();
    }
}
 
源代码29 项目: Kylin   文件: ExportHBaseData.java
private void setup() throws IOException {
    long currentTIME = System.currentTimeMillis();
    exportFolder = "/tmp/hbase-export/" + currentTIME + "/";
    backupArchive = "/tmp/kylin_" + currentTIME + ".tar.gz";

    KylinConfig.destoryInstance();
    System.setProperty(KylinConfig.KYLIN_CONF, AbstractKylinTestCase.SANDBOX_TEST_DATA);

    kylinConfig = KylinConfig.getInstanceFromEnv();
    cli = kylinConfig.getCliCommandExecutor();

    String metadataUrl = kylinConfig.getMetadataUrl();
    // split [email protected]_URL
    int cut = metadataUrl.indexOf('@');
    tableNameBase = metadataUrl.substring(0, cut);
    String hbaseUrl = cut < 0 ? metadataUrl : metadataUrl.substring(cut + 1);

    HConnection conn = HBaseConnection.get(hbaseUrl);
    try {
        hbase = new HBaseAdmin(conn);
        config = hbase.getConfiguration();
        allTables = hbase.listTables();
    } catch (IOException e) {
        e.printStackTrace();
        throw e;
    }
}
 
源代码30 项目: Kylin   文件: HBaseConnection.java
@Override
public void run() {
    for (HConnection conn : ConnPool.values()) {
        try {
            conn.close();
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}
 
 类方法
 同包方法