org.springframework.security.authentication.CredentialsExpiredException#org.apache.kylin.common.KylinConfig源码实例Demo

下面列出了org.springframework.security.authentication.CredentialsExpiredException#org.apache.kylin.common.KylinConfig 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: kylin   文件: QueryMetricsFacade.java
private static QueryMetrics getQueryMetrics(String name) {
    KylinConfig config = KylinConfig.getInstanceFromEnv();
    int[] intervals = config.getQueryMetricsPercentilesIntervals();

    QueryMetrics queryMetrics = metricsMap.get(name);
    if (queryMetrics != null) {
        return queryMetrics;
    }

    synchronized (QueryMetricsFacade.class) {
        queryMetrics = metricsMap.get(name);
        if (queryMetrics != null) {
            return queryMetrics;
        }

        try {
            queryMetrics = new QueryMetrics(intervals).registerWith(name);
            metricsMap.put(name, queryMetrics);
            return queryMetrics;
        } catch (MetricsException e) {
            logger.warn(name + " register error: ", e);
        }
    }
    return queryMetrics;
}
 
源代码2 项目: kylin   文件: CubeStatsReader.java
/**
 * @param cuboidScheduler if it's null, part of it's functions will not be supported
 */
public CubeStatsReader(CubeSegment cubeSegment, CuboidScheduler cuboidScheduler, KylinConfig kylinConfig)
        throws IOException {
    ResourceStore store = ResourceStore.getStore(kylinConfig);
    String statsKey = cubeSegment.getStatisticsResourcePath();
    RawResource resource = store.getResource(statsKey);
    if (resource == null)
        throw new IllegalStateException("Missing resource at " + statsKey);

    File tmpSeqFile = writeTmpSeqFile(resource.content());
    Path path = new Path(HadoopUtil.fixWindowsPath("file://" + tmpSeqFile.getAbsolutePath()));

    CubeStatsResult cubeStatsResult = new CubeStatsResult(path, kylinConfig.getCubeStatsHLLPrecision());
    tmpSeqFile.delete();

    this.seg = cubeSegment;
    this.cuboidScheduler = cuboidScheduler;
    this.samplingPercentage = cubeStatsResult.getPercentage();
    this.mapperNumberOfFirstBuild = cubeStatsResult.getMapperNumber();
    this.mapperOverlapRatioOfFirstBuild = cubeStatsResult.getMapperOverlapRatio();
    this.cuboidRowEstimatesHLL = cubeStatsResult.getCounterMap();
    this.sourceRowCount = cubeStatsResult.getSourceRecordCount();
}
 
源代码3 项目: kylin-on-parquet-v2   文件: QueryConnection.java
public static Connection getConnection(String project) throws SQLException {
    if (!isRegister) {
        try {
            Class<?> aClass = Thread.currentThread().getContextClassLoader()
                    .loadClass("org.apache.calcite.jdbc.Driver");
            Driver o = (Driver) aClass.getDeclaredConstructor().newInstance();
            DriverManager.registerDriver(o);
        } catch (ClassNotFoundException | InstantiationException | IllegalAccessException | NoSuchMethodException | InvocationTargetException e) {
            e.printStackTrace();
        }
        isRegister = true;
    }
    File olapTmp = OLAPSchemaFactory.createTempOLAPJson(project, KylinConfig.getInstanceFromEnv());
    Properties info = new Properties();
    info.putAll(KylinConfig.getInstanceFromEnv().getCalciteExtrasProperties());
    // Import calcite props from jdbc client(override the kylin.properties)
    info.putAll(BackdoorToggles.getJdbcDriverClientCalciteProps());
    info.put("model", olapTmp.getAbsolutePath());
    info.put("typeSystem", "org.apache.kylin.query.calcite.KylinRelDataTypeSystem");
    return DriverManager.getConnection("jdbc:calcite:", info);
}
 
@Test
public void testUpdateEntryToProject() throws IOException {
    KylinConfig kylinConfig = getTestConfig();
    BadQueryHistoryManager manager = BadQueryHistoryManager.getInstance(kylinConfig);

    String queryId = RandomUtil.randomUUID().toString();
    manager.upsertEntryToProject(
            new BadQueryEntry("sql", "adj", 1459362239000L, 100, "server", "t-0", "user", queryId, "cube"),
            "default");
    BadQueryHistory history = manager.upsertEntryToProject(
            new BadQueryEntry("sql", "adj2", 1459362239000L, 120, "server2", "t-1", "user", queryId, "cube"), "default");

    NavigableSet<BadQueryEntry> entries = history.getEntries();
    BadQueryEntry newEntry = entries
            .floor(new BadQueryEntry("sql", "adj2", 1459362239000L, 120, "server2", "t-1", "user", queryId, "cube"));
    System.out.println(newEntry);
    assertEquals("adj2", newEntry.getAdj());
    assertEquals("server2", newEntry.getServer());
    assertEquals("t-1", newEntry.getThread());
    assertEquals("user", newEntry.getUser());
    assertEquals(120, (int) newEntry.getRunningSec());
}
 
源代码5 项目: kylin-on-parquet-v2   文件: SparkExecutable.java
private void updateSparkDimensionDicMetadata(KylinConfig config, CubeInstance cube, String segmentId)
        throws IOException {
    KylinConfig hdfsConfig = AbstractHadoopJob
            .loadKylinConfigFromHdfs(this.getParam(SparkBuildDictionary.OPTION_META_URL.getOpt()));
    CubeInstance cubeInstance = CubeManager.getInstance(hdfsConfig).reloadCube(cube.getName());
    CubeSegment segment = cubeInstance.getSegmentById(segmentId);

    CubeSegment oldSeg = cube.getSegmentById(segmentId);
    oldSeg.setDictionaries((ConcurrentHashMap<String, String>) segment.getDictionaries());
    oldSeg.setSnapshots((ConcurrentHashMap) segment.getSnapshots());
    oldSeg.getRowkeyStats().addAll(segment.getRowkeyStats());
    CubeInstance cubeCopy = cube.latestCopyForWrite();
    CubeUpdate update = new CubeUpdate(cubeCopy);
    update.setToUpdateSegs(oldSeg);
    CubeManager.getInstance(config).updateCube(update);

    Set<String> dumpList = new LinkedHashSet<>();
    dumpList.addAll(segment.getDictionaryPaths());
    dumpList.addAll(segment.getSnapshotPaths());

    JobRelatedMetaUtil.dumpAndUploadKylinPropsAndMetadata(dumpList, (KylinConfigExt) segment.getConfig(),
            config.getMetadataUrl().toString());
}
 
源代码6 项目: kylin   文件: BasicMeasureType.java
@Override
public void validate(FunctionDesc functionDesc) throws IllegalArgumentException {
    DataType rtype = dataType;

    if (funcName.equals(FunctionDesc.FUNC_SUM)) {
        if (rtype.isNumberFamily() == false) {
            throw new IllegalArgumentException("Return type for function " + funcName + " must be one of " + DataType.NUMBER_FAMILY);
        }
    } else if (funcName.equals(FunctionDesc.FUNC_COUNT)) {
        if (rtype.isIntegerFamily() == false) {
            throw new IllegalArgumentException("Return type for function " + funcName + " must be one of " + DataType.INTEGER_FAMILY);
        }
    } else if (funcName.equals(FunctionDesc.FUNC_MAX) || funcName.equals(FunctionDesc.FUNC_MIN)) {
        if (rtype.isNumberFamily() == false) {
            throw new IllegalArgumentException("Return type for function " + funcName + " must be one of " + DataType.NUMBER_FAMILY);
        }
    } else {
        KylinConfig config = KylinConfig.getInstanceFromEnv();
        if (config.isQueryIgnoreUnknownFunction() == false)
            throw new IllegalArgumentException("Unrecognized function: [" + funcName + "]");
    }
}
 
public void updateLayout(SegmentInfo seg, KylinConfig config) {
    for (int i = 0; i < currentLayoutsNum; i++) {
        try {
            logger.info("Wait to take job result.");
            JobResult result = completionService.take().get();
            logger.info("Take job result successful.");
            if (result.isFailed()) {
                shutDownPool();
                throw new RuntimeException(result.getThrowable());
            }
            seg.updateLayout(result.layout);
        } catch (InterruptedException | ExecutionException e) {
            shutDownPool();
            throw new RuntimeException(e);
        }
    }
    currentLayoutsNum = 0;
}
 
源代码8 项目: Kylin   文件: InvertedIndexMapper.java
@Override
protected void setup(Context context) throws IOException {
    super.publishConfiguration(context.getConfiguration());

    Configuration conf = context.getConfiguration();

    KylinConfig config = AbstractHadoopJob.loadKylinPropsAndMetadata(conf);
    IIManager mgr = IIManager.getInstance(config);
    IIInstance ii = mgr.getII(conf.get(BatchConstants.CFG_II_NAME));
    IISegment seg = ii.getSegment(conf.get(BatchConstants.CFG_II_SEGMENT_NAME), SegmentStatusEnum.NEW);
    this.info = new TableRecordInfo(seg);
    this.rec = this.info.createTableRecord();

    outputKey = new LongWritable();
    outputValue = new ImmutableBytesWritable(rec.getBytes());

    schema = HCatInputFormat.getTableSchema(context.getConfiguration());
    
    fields = schema.getFields();
}
 
源代码9 项目: kylin   文件: AbstractInfoExtractor.java
private void dumpBasicDiagInfo() throws IOException {
    try {
        for (String commitSHA1File : COMMIT_SHA1_FILES) {
            File commitFile = new File(KylinConfig.getKylinHome(), commitSHA1File);
            if (commitFile.exists()) {
                FileUtils.copyFileToDirectory(commitFile, exportDir);
            }
        }
    } catch (IOException e) {
        logger.warn("Failed to copy commit_SHA1 file.", e);
    }

    String output = KylinVersion.getKylinClientInformation() + "\n";
    FileUtils.writeStringToFile(new File(exportDir, "kylin_env"), output, Charset.defaultCharset());

    StringBuilder basicSb = new StringBuilder();
    basicSb.append("MetaStoreID: ").append(ToolUtil.getMetaStoreId()).append("\n");
    basicSb.append("PackageType: ").append(packageType.toUpperCase(Locale.ROOT)).append("\n");
    SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss Z", Locale.ROOT);
    basicSb.append("PackageTimestamp: ").append(format.format(new Date())).append("\n");
    basicSb.append("Host: ").append(ToolUtil.getHostName()).append("\n");
    FileUtils.writeStringToFile(new File(exportDir, "info"), basicSb.toString(), Charset.defaultCharset());
}
 
源代码10 项目: kylin   文件: BaseCuboidBuilder.java
public BaseCuboidBuilder(KylinConfig kylinConfig, CubeDesc cubeDesc, CubeSegment cubeSegment,
        CubeJoinedFlatTableEnrich intermediateTableDesc, Map<TblColRef, Dictionary<String>> dictionaryMap) {
    this.kylinConfig = kylinConfig;
    this.cubeDesc = cubeDesc;
    this.cubeSegment = cubeSegment;
    this.intermediateTableDesc = intermediateTableDesc;
    this.dictionaryMap = dictionaryMap;

    Cuboid baseCuboid = Cuboid.getBaseCuboid(cubeDesc);
    rowKeyEncoder = AbstractRowKeyEncoder.createInstance(cubeSegment, baseCuboid);

    measureDescList = cubeDesc.getMeasures();
    aggrIngesters = MeasureIngester.create(measureDescList);
    measureCodec = new BufferedMeasureCodec(measureDescList);

    kvBuilder = new KeyValueBuilder(intermediateTableDesc);
    checkHiveGlobalDictionaryColumn();
}
 
源代码11 项目: kylin-on-parquet-v2   文件: RealizationSignature.java
static HybridSignature getHybridSignature(KylinConfig config, String realizationName) {
    HybridInstance hybridInstance = HybridManager.getInstance(config).getHybridInstance(realizationName);
    if (hybridInstance == null) {
        return null;
    }
    IRealization[] realizations = hybridInstance.getRealizations();
    Set<RealizationSignature> realizationSignatureSet = Sets.newHashSetWithExpectedSize(realizations.length);
    for (IRealization realization : realizations) {
        RealizationSignature realizationSignature = null;
        if (realization.getType() == RealizationType.CUBE) {
            realizationSignature = CubeSignature.getCubeSignature(config, realization.getName());
        } else if (realization.getType() == RealizationType.HYBRID) {
            realizationSignature = getHybridSignature(config, realization.getName());
        }
        if (realizationSignature != null) {
            realizationSignatureSet.add(realizationSignature);
        }
    }
    return new HybridSignature(realizationName, realizationSignatureSet);
}
 
源代码12 项目: kylin-on-parquet-v2   文件: HBaseLookupMRSteps.java
public void addMaterializeLookupTableSteps(LookupMaterializeContext context, String tableName, SnapshotTableDesc snapshotTableDesc) {
    KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv();
    ExtTableSnapshotInfoManager extTableSnapshotInfoManager = ExtTableSnapshotInfoManager.getInstance(kylinConfig);
    TableDesc tableDesc = TableMetadataManager.getInstance(kylinConfig).getTableDesc(tableName, cube.getProject());
    IReadableTable sourceTable = SourceManager.createReadableTable(tableDesc, context.getJobFlow().getId());
    try {
        ExtTableSnapshotInfo latestSnapshot = extTableSnapshotInfoManager.getLatestSnapshot(
                sourceTable.getSignature(), tableName);
        if (latestSnapshot != null) {
            logger.info("there is latest snapshot exist for table:{}, skip build snapshot step.", tableName);
            context.addLookupSnapshotPath(tableName, latestSnapshot.getResourcePath());
            return;
        }
    } catch (IOException ioException) {
        throw new RuntimeException(ioException);
    }
    logger.info("add build snapshot steps for table:{}", tableName);
    String snapshotID = genLookupSnapshotID();
    context.addLookupSnapshotPath(tableName, ExtTableSnapshotInfo.getResourcePath(tableName, snapshotID));
    addLookupTableConvertToHFilesStep(context.getJobFlow(), tableName, snapshotID);
    addLookupTableHFilesBulkLoadStep(context.getJobFlow(), tableName, snapshotID);
    if (snapshotTableDesc !=null && snapshotTableDesc.isEnableLocalCache()) {
        addUpdateSnapshotQueryCacheStep(context.getJobFlow(), tableName, snapshotID);
    }
}
 
源代码13 项目: kylin   文件: CubeDescManager.java
private CubeDescManager(KylinConfig cfg) throws IOException {
    logger.info("Initializing CubeDescManager with config {}", cfg);
    this.config = cfg;
    this.cubeDescMap = new CaseInsensitiveStringCache<>(config, "cube_desc");
    this.crud = new CachedCrudAssist<CubeDesc>(getStore(), ResourceStore.CUBE_DESC_RESOURCE_ROOT, CubeDesc.class,
            cubeDescMap) {
        @Override
        protected CubeDesc initEntityAfterReload(CubeDesc cubeDesc, String resourceName) {
            if (cubeDesc.isDraft())
                throw new IllegalArgumentException(String.format(Locale.ROOT, CUBE_SHOULD_NOT_BE_DRAFT_MSG, cubeDesc.getName()));

            try {
                cubeDesc.init(config);
            } catch (Exception e) {
                logger.warn(String.format(Locale.ROOT, BROKEN_CUBE_MSG, cubeDesc.resourceName()), e);
                cubeDesc.addError(e.toString());
            }
            return cubeDesc;
        }
    };

    // touch lower level metadata before registering my listener
    crud.reloadAll();
    Broadcaster.getInstance(config).registerListener(new CubeDescSyncListener(), "cube_desc");
}
 
源代码14 项目: kylin   文件: KafkaSource.java
@Override
public IStreamingConnector createStreamingConnector(String cubeName, List<Partition> assignedPartitions,
        ConsumerStartProtocol startProtocol, StreamingSegmentManager streamingSegmentManager) {
    logger.info("Create StreamingConnector for Cube {}, assignedPartitions {}, startProtocol {}", cubeName,
            assignedPartitions, startProtocol);
    try {
        KylinConfig kylinConf = KylinConfig.getInstanceFromEnv();
        CubeInstance cubeInstance = CubeManager.getInstance(kylinConf).getCube(cubeName);
        IStreamingSource streamingSource = StreamingSourceFactory.getStreamingSource(cubeInstance);
        String streamingName = cubeInstance.getRootFactTable();
        StreamingSourceConfig streamingSourceConfig = StreamingSourceConfigManager.getInstance(kylinConf)
                .getConfig(streamingName);
        String topic = getTopicName(streamingSourceConfig.getProperties());
        Map<String, Object> conf = getKafkaConf(streamingSourceConfig.getProperties(), cubeInstance.getConfig());

        Class<?> clazz = getStreamingMessageParserClass(streamingSourceConfig.getProperties());
        Constructor<?> constructor = clazz.getConstructor(CubeDesc.class, MessageParserInfo.class);
        IStreamingMessageParser<?> parser = (IStreamingMessageParser<?>) constructor
                .newInstance(cubeInstance.getDescriptor(), streamingSourceConfig.getParserInfo());
        KafkaConnector connector = new KafkaConnector(conf, topic, parser, this);
        if (startProtocol != null) {
            if (startProtocol.getStartPosition() != null && startProtocol.getStartPosition().length() > 0) {
                KafkaPosition position = (KafkaPosition) streamingSource.getSourcePositionHandler().parsePosition(startProtocol.getStartPosition());
                connector.setStartPartition(assignedPartitions, startProtocol.getStartMode(),
                        position.getPartitionOffsets());
                streamingSegmentManager.restoreConsumerStates(position);
            } else {
                connector.setStartPartition(assignedPartitions, startProtocol.getStartMode(),
                        null);
            }
            streamingSegmentManager.checkpoint();
        } else if (streamingSegmentManager != null) {
            setupConnectorFromCheckpoint(connector, assignedPartitions, streamingSource, streamingSegmentManager);
        }

        return connector;
    } catch (Exception e) {
        throw new StreamingException("streaming connector create fail, cube:" + cubeName, e);
    }
}
 
源代码15 项目: kylin-on-parquet-v2   文件: BaseSchedulerTest.java
@Before
public void setup() throws Exception {
    System.setProperty("kylin.job.scheduler.poll-interval-second", "1");
    createTestMetadata();
    execMgr = ExecutableManager.getInstance(KylinConfig.getInstanceFromEnv());
    startScheduler();
}
 
源代码16 项目: kylin-on-parquet-v2   文件: CuboidReducer.java
@Override
protected void doSetup(Context context) throws IOException {
    super.bindCurrentConfiguration(context.getConfiguration());
    cubeName = context.getConfiguration().get(BatchConstants.CFG_CUBE_NAME).toUpperCase(Locale.ROOT);

    // only used in Build job, not in Merge job
    cuboidLevel = context.getConfiguration().getInt(BatchConstants.CFG_CUBE_CUBOID_LEVEL, 0);

    KylinConfig config = AbstractHadoopJob.loadKylinPropsAndMetadata();

    cubeDesc = CubeManager.getInstance(config).getCube(cubeName).getDescriptor();
    measuresDescs = cubeDesc.getMeasures();

    codec = new BufferedMeasureCodec(measuresDescs);
    aggs = new MeasureAggregators(measuresDescs);

    input = new Object[measuresDescs.size()];
    result = new Object[measuresDescs.size()];

    List<Integer> needAggMeasuresList = Lists.newArrayList();
    for (int i = 0; i < measuresDescs.size(); i++) {
        if (cuboidLevel == 0) {
            needAggMeasuresList.add(i);
        } else {
            if (!measuresDescs.get(i).getFunction().getMeasureType().onlyAggrInBaseCuboid()) {
                needAggMeasuresList.add(i);
            }
        }
    }

    needAggrMeasures = new int[needAggMeasuresList.size()];
    for (int i = 0; i < needAggMeasuresList.size(); i++) {
        needAggrMeasures[i] = needAggMeasuresList.get(i);
    }
}
 
public CubeMigrationCheckCLI(KylinConfig kylinConfig, Boolean isFix) throws IOException {
    this.dstCfg = kylinConfig;
    this.ifFix = isFix;

    Connection conn = HBaseConnection.get(kylinConfig.getStorageUrl());
    hbaseAdmin = conn.getAdmin();
    issueExistHTables = Lists.newArrayList();
    inconsistentHTables = Lists.newArrayList();
}
 
源代码18 项目: kylin   文件: JobBuilderSupport.java
public String getBuildGlobalDictionaryMaxDistinctCountPath(String jobId) {
    KylinConfig conf = seg.getConfig();
    String dbDir = conf.getHiveDatabaseDir();
    IJoinedFlatTableDesc flatDesc = EngineFactory.getJoinedFlatTableDesc(seg);
    String tableName = flatDesc.getTableName() + conf.getMrHiveDistinctValueTableSuffix();
    String outPut = dbDir + "/" + tableName + "/dict_column=" + BatchConstants.CFG_GLOBAL_DICT_STATS_PARTITION_VALUE;
    return outPut;
}
 
源代码19 项目: kylin-on-parquet-v2   文件: FlinkExecutable.java
private String getAppState(String appId) throws IOException {
    CliCommandExecutor executor = KylinConfig.getInstanceFromEnv().getCliCommandExecutor();
    PatternedLogger patternedLogger = new PatternedLogger(logger);
    String stateCmd = String.format(Locale.ROOT, "yarn application -status %s", appId);
    executor.execute(stateCmd, patternedLogger, null);
    Map<String, String> info = patternedLogger.getInfo();
    return info.get(ExecutableConstants.YARN_APP_STATE);
}
 
源代码20 项目: kylin   文件: MassInTupleFilter.java
@Override
public void addChild(TupleFilter child) {
    if (child instanceof ColumnTupleFilter) {
        super.addChild(child);
        ColumnTupleFilter columnFilter = (ColumnTupleFilter) child;
        if (this.column != null) {
            throw new IllegalStateException("Duplicate columns! old is " + column.getName() + " and new is " + columnFilter.getColumn().getName());
        }
        this.column = columnFilter.getColumn();

    } else if (child instanceof ConstantTupleFilter) {
        // super.addChild(child) is omitted because the filter table name is useless at storage side, 
        // we'll extract the useful filterTableResourceIdentifier,filterTableType etc and save it at the MassInTupleFilter itself

        if (filterTableName == null) {
            filterTableName = (String) child.getValues().iterator().next();
            ExternalFilterDesc externalFilterDesc = TableMetadataManager.getInstance(KylinConfig.getInstanceFromEnv()).getExtFilterDesc(filterTableName);
            if (externalFilterDesc == null) {
                throw new IllegalArgumentException("External filter named " + filterTableName + " is not found");
            }
            filterTableType = externalFilterDesc.getFilterTableType();
            filterTableResourceIdentifier = externalFilterDesc.getFilterResourceIdentifier();
        }
    } else {
        throw new IllegalStateException("MassInTupleFilter only has two children: one ColumnTupleFilter and one ConstantTupleFilter");
    }
}
 
源代码21 项目: kylin   文件: KafkaSource.java
@Override
public void unloadTable(String tableName, String project) throws IOException {
    StreamingConfig config;
    KafkaConfig kafkaConfig;
    KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv();
    StreamingManager streamingManager = StreamingManager.getInstance(kylinConfig);
    KafkaConfigManager kafkaConfigManager = KafkaConfigManager.getInstance(kylinConfig);
    config = streamingManager.getStreamingConfig(tableName);
    kafkaConfig = kafkaConfigManager.getKafkaConfig(tableName);
    streamingManager.removeStreamingConfig(config);
    kafkaConfigManager.removeKafkaConfig(kafkaConfig);
}
 
源代码22 项目: kylin-on-parquet-v2   文件: JdbcExplorer.java
private ColumnDesc[] extractColumnFromMeta(ResultSet meta) throws SQLException {
    List<ColumnDesc> columns = new ArrayList<>();

    while (meta.next()) {
        String cname = meta.getString("COLUMN_NAME");
        int type = meta.getInt("DATA_TYPE");
        int csize = meta.getInt("COLUMN_SIZE");
        int digits = meta.getInt("DECIMAL_DIGITS");
        int pos = meta.getInt("ORDINAL_POSITION");
        String remarks = meta.getString("REMARKS");

        ColumnDesc cdesc = new ColumnDesc();
        cdesc.setName(cname.toUpperCase(Locale.ROOT));

        String kylinType = SqlUtil.jdbcTypeToKylinDataType(type);
        int precision = (SqlUtil.isPrecisionApplicable(kylinType) && csize > 0) ? csize : -1;
        precision = Math.min(precision, KylinConfig.getInstanceFromEnv().getDefaultVarcharPrecision());
        int scale = (SqlUtil.isScaleApplicable(kylinType) && digits > 0) ? digits : -1;

        cdesc.setDatatype(new DataType(kylinType, precision, scale).toString());
        cdesc.setId(String.valueOf(pos));
        cdesc.setComment(remarks);
        columns.add(cdesc);
    }

    return columns.toArray(new ColumnDesc[columns.size()]);
}
 
源代码23 项目: kylin   文件: AbstractHadoopJob.java
private String getDefaultMapRedClasspath() {

        String classpath = "";
        try {
            CliCommandExecutor executor = KylinConfig.getInstanceFromEnv().getCliCommandExecutor();
            String output = executor.execute("mapred classpath").getSecond();
            classpath = output.trim().replace(':', ',');
        } catch (IOException e) {
            logger.error("Failed to run: 'mapred classpath'.", e);
        }

        return classpath;
    }
 
源代码24 项目: kylin   文件: JobInfoConverter.java
public static JobSearchResult parseToJobSearchResult(DefaultChainedExecutable job, Map<String, ExecutableOutputPO> outputs) {
    if (job == null) {
        logger.warn("job is null.");
        return null;
    }

    ExecutableOutputPO output = outputs.get(job.getId());
    if (output == null) {
        logger.warn("job output is null.");
        return null;
    }

    final JobSearchResult result = new JobSearchResult();

    String cubeName = CubingExecutableUtil.getCubeName(job.getParams());

    if (cubeName == null) {
        cubeName = job.getParam("model_name");
    } else {
        CubeInstance cube = CubeManager.getInstance(KylinConfig.getInstanceFromEnv()).getCube(cubeName);
        if (cube != null) {
            cubeName = cube.getDisplayName();
        }
    }
    result.setCubeName(cubeName);
    result.setId(job.getId());
    result.setJobName(job.getName());
    result.setLastModified(output.getLastModified());
    result.setJobStatus(JobInfoConverter.parseToJobStatus(job.getStatus()));
    return result;
}
 
源代码25 项目: kylin-on-parquet-v2   文件: ColumnGenConfig.java
private List<String> getPkValues(ModelDataGenerator modelGen, Map<String, String> config, List<String> dftPkValues) throws IOException {
    String pkColName = config.get("pk");
    if (pkColName == null)
        return dftPkValues;
    
    int cut = pkColName.lastIndexOf('.');
    String pkTableName = pkColName.substring(0, cut);
    pkColName = pkColName.substring(cut + 1);
    
    KylinConfig kylinConfig = modelGen.getModle().getConfig();
    String project = modelGen.getModle().getProject();
    ColumnDesc pkcol = TableMetadataManager.getInstance(kylinConfig)//
            .getTableDesc(pkTableName, project).findColumnByName(pkColName);
    return modelGen.getPkValues(pkcol);
}
 
源代码26 项目: Kylin   文件: DeployUtil.java
public static void deployMetadata() throws IOException {
    // install metadata to hbase
    ResourceTool.reset(config());
    ResourceTool.copy(KylinConfig.createInstanceFromUri(AbstractKylinTestCase.LOCALMETA_TEST_DATA), config());

    // update cube desc signature.
    for (CubeInstance cube : CubeManager.getInstance(config()).listAllCubes()) {
        cube.getDescriptor().setSignature(cube.getDescriptor().calculateSignature());
        CubeManager.getInstance(config()).updateCube(cube);
    }
}
 
源代码27 项目: kylin-on-parquet-v2   文件: SnapshotManagerTest.java
@Test
public void testBuildSameSnapshotSameTime() throws InterruptedException, IOException {
    final int threadCount = 3;
    final ExecutorService executorService = Executors.newFixedThreadPool(threadCount);
    final CountDownLatch countDownLatch = new CountDownLatch(threadCount);
    final TableDesc tableDesc = genTableDesc("TEST_TABLE");

    kylinConfig = KylinConfig.getInstanceFromEnv();
    snapshotManager = SnapshotManager.getInstance(kylinConfig);
    ResourceStore store = ResourceStore.getStore(kylinConfig);

    for (int i = 0; i < threadCount; ++i) {
        executorService.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    snapshotManager.buildSnapshot(genTable("./origin", expect), tableDesc, kylinConfig);
                } catch (IOException e) {
                    Assert.fail();
                } finally {
                    countDownLatch.countDown();
                }
            }
        });
    }
    countDownLatch.await();
    Assert.assertEquals(1, store.listResources("/table_snapshot/NULL.TEST_TABLE").size());
}
 
源代码28 项目: kylin-on-parquet-v2   文件: ZKUtil.java
/**
 * Get zookeeper connection string from kylin.properties
 */
public static String getZKConnectString(KylinConfig config) {
    String zkString = config.getZookeeperConnectString();
    if (zkString == null) {
        zkString = getZKConnectStringFromHBase();
        if (zkString == null) {
            throw new RuntimeException("Please set 'kylin.env.zookeeper-connect-string' in kylin.properties");
        }
    }

    return zkString;
}
 
源代码29 项目: kylin-on-parquet-v2   文件: ITStorageTest.java
@Before
public void setUp() throws Exception {
    this.createTestMetadata();

    CubeManager cubeMgr = CubeManager.getInstance(getTestConfig());
    cube = cubeMgr.getCube("test_kylin_cube_without_slr_left_join_empty");
    Assert.assertNotNull(cube);
    storageEngine = StorageFactory.createQuery(cube);
    context = new StorageContext();
    context.setConnUrl(KylinConfig.getInstanceFromEnv().getStorageUrl());
    mockup = new StorageMockUtils(cube.getModel());
}
 
源代码30 项目: kylin   文件: JdbcTableReader.java
/**
 * Constructor for reading whole jdbc table
 * @param dbName
 * @param tableName
 * @throws IOException
 */
public JdbcTableReader(String dbName, String tableName) throws IOException {
    this.dbName = dbName;
    this.tableName = tableName;
    KylinConfig config = KylinConfig.getInstanceFromEnv();
    String connectionUrl = config.getJdbcSourceConnectionUrl();
    String driverClass = config.getJdbcSourceDriver();
    String jdbcUser = config.getJdbcSourceUser();
    String jdbcPass = config.getJdbcSourcePass();
    dbconf = new DBConnConf(driverClass, connectionUrl, jdbcUser, jdbcPass);
    jdbcCon = SqlUtil.getConnection(dbconf);
    IJdbcMetadata meta = JdbcMetadataFactory
            .getJdbcMetadata(SourceDialect.getDialect(config.getJdbcSourceDialect()), dbconf);

    Map<String, String> metadataCache = new TreeMap<>();
    JdbcHiveInputBase.JdbcBaseBatchCubingInputSide.calCachedJdbcMeta(metadataCache, dbconf, meta);
    String database = JdbcHiveInputBase.getSchemaQuoted(metadataCache, dbName, meta, true);
    String table = JdbcHiveInputBase.getTableIdentityQuoted(dbName, tableName, metadataCache, meta, true);

    String sql = String.format(Locale.ROOT, "select * from %s.%s", database, table);
    try {
        statement = jdbcCon.createStatement();
        rs = statement.executeQuery(sql);
        colCount = rs.getMetaData().getColumnCount();
    } catch (SQLException e) {
        throw new IOException(String.format(Locale.ROOT, "error while exec %s", sql), e);
    }
}