java.util.LinkedHashMap#isEmpty ( )源码实例Demo

下面列出了java.util.LinkedHashMap#isEmpty ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: TarsJava   文件: ServerStatHelper.java
public void report() {
    try {
        ServerConfig serverConfig = ConfigurationManager.getInstance().getServerConfig();
        LinkedHashMap<String, ServantAdapterConfig> adapterMap = serverConfig.getServantAdapterConfMap();
        if (adapterMap == null || adapterMap.isEmpty()) {
            return;
        }

        for (java.util.Map.Entry<String, ServantAdapterConfig> adapterEntry : adapterMap.entrySet()) {
            if (OmConstants.AdminServant.equals(adapterEntry.getKey())) {
                continue;
            }

            ServantAdapterConfig servantCfg = adapterEntry.getValue();
            communicator.getStatHelper().report(InvokeStatHelper.getInstance().getProxyStat(servantCfg.getServant()), false);
        }
    } catch (Exception e) {
        omLogger.error("ServerStatHelper|ReportThread error", e);
    }
}
 
源代码2 项目: manifold   文件: ManLog_8.java
@Override
public void report( JCDiagnostic issue )
{
  LinkedHashMap<JCTree, Stack<Stack<JCDiagnostic>>> suspendedIssues =
    _suspendedIssues.get( getDiagnosticHandler() );
  if( suspendedIssues == null || suspendedIssues.isEmpty() )
  {
    super.report( issue );
  }
  else
  {
    JCTree last = null;
    for( JCTree key: suspendedIssues.keySet() )
    {
      last = key;
    }
    suspendedIssues.get( last ).peek().push( issue );
  }
}
 
源代码3 项目: manifold   文件: ManLog_8.java
void popSuspendIssues( JCTree tree )
{
  LinkedHashMap<JCTree, Stack<Stack<JCDiagnostic>>> suspendedIssues =
    _suspendedIssues.get( getDiagnosticHandler() );

  if( suspendedIssues.isEmpty() )
  {
    // found method in superclass, already recorded any issues from that attempt
    return;
  }

  Stack<Stack<JCDiagnostic>> issueFrames = suspendedIssues.get( tree );
  if( issueFrames.size() == 1 )
  {
    if( isRootFrame( tree ) )
    {
      recordRecentSuspendedIssuesAndRemoveOthers( tree );
    }
  }
  else
  {
    issueFrames.pop();
  }
}
 
源代码4 项目: lucene-solr   文件: SloppyPhraseMatcher.java
/** initialize with checking for repeats. Heavy work, but done only for the first candidate doc.<p>
 * If there are repetitions, check if multi-term postings (MTP) are involved.<p>
 * Without MTP, once PPs are placed in the first candidate doc, repeats (and groups) are visible.<br>
 * With MTP, a more complex check is needed, up-front, as there may be "hidden collisions".<br>
 * For example P1 has {A,B}, P1 has {B,C}, and the first doc is: "A C B". At start, P1 would point
 * to "A", p2 to "C", and it will not be identified that P1 and P2 are repetitions of each other.<p>
 * The more complex initialization has two parts:<br>
 * (1) identification of repetition groups.<br>
 * (2) advancing repeat groups at the start of the doc.<br>
 * For (1), a possible solution is to just create a single repetition group, 
 * made of all repeating pps. But this would slow down the check for collisions, 
 * as all pps would need to be checked. Instead, we compute "connected regions" 
 * on the bipartite graph of postings and terms.  
 */
private boolean initFirstTime() throws IOException {
  //System.err.println("initFirstTime: doc: "+min.doc);
  checkedRpts = true;
  placeFirstPositions();

  LinkedHashMap<Term,Integer> rptTerms = repeatingTerms(); 
  hasRpts = !rptTerms.isEmpty();

  if (hasRpts) {
    rptStack = new PhrasePositions[numPostings]; // needed with repetitions
    ArrayList<ArrayList<PhrasePositions>> rgs = gatherRptGroups(rptTerms);
    sortRptGroups(rgs);
    if (!advanceRepeatGroups()) {
      return false; // PPs exhausted
    }
  }
  
  fillQueue();
  return true; // PPs available
}
 
源代码5 项目: flink   文件: PartitionPathUtils.java
/**
 * Make partition path from partition spec.
 *
 * @param partitionSpec The partition spec.
 * @return An escaped, valid partition name.
 */
public static String generatePartitionPath(LinkedHashMap<String, String> partitionSpec) {
	if (partitionSpec.isEmpty()) {
		return "";
	}
	StringBuilder suffixBuf = new StringBuilder();
	int i = 0;
	for (Map.Entry<String, String> e : partitionSpec.entrySet()) {
		if (i > 0) {
			suffixBuf.append(Path.SEPARATOR);
		}
		suffixBuf.append(escapePathName(e.getKey()));
		suffixBuf.append('=');
		suffixBuf.append(escapePathName(e.getValue()));
		i++;
	}
	suffixBuf.append(Path.SEPARATOR);
	return suffixBuf.toString();
}
 
/**
 * A common method that resolves the formatter's Off-On regions.<br>
 * In case the given {@link IParseRootNode} has comments, the method will try to collect the 'Off' and 'On' tags and
 * set the regions that will be ignored when formatting.<br>
 * <br>
 * Note: This method is to be used, mainly, when there is no other comments handling.In case the node-builder is
 * already traversing the comments, it's recommended to collect the Off/On regions as part of that process to
 * improve performance.
 * 
 * @param parseNode
 * @param document
 * @param offOnEnablementKey
 * @param offPatternKey
 * @param onPatternKey
 * @return A list of regions that should be excluded from being formatted (may be null).
 */
protected List<IRegion> resolveOffOnRegions(IParseRootNode parseNode, IFormatterDocument document,
		String offOnEnablementKey, String offPatternKey, String onPatternKey)
{
	if (!document.getBoolean(offOnEnablementKey))
	{
		return null;
	}
	IParseNode[] commentNodes = parseNode.getCommentNodes();
	if (commentNodes == null || commentNodes.length == 0)
	{
		return null;
	}
	LinkedHashMap<Integer, String> commentsMap = new LinkedHashMap<Integer, String>(commentNodes.length);
	for (IParseNode comment : commentNodes)
	{
		int start = comment.getStartingOffset();
		int end = comment.getEndingOffset();
		String commentStr = document.get(start, end);
		commentsMap.put(start, commentStr);
	}
	// Generate the OFF/ON regions
	if (!commentsMap.isEmpty())
	{
		Pattern onPattern = Pattern.compile(Pattern.quote(document.getString(onPatternKey)));
		Pattern offPattern = Pattern.compile(Pattern.quote(document.getString(offPatternKey)));
		return FormatterUtils.resolveOnOffRegions(commentsMap, onPattern, offPattern, document.getLength() - 1);
	}
	return null;
}
 
源代码7 项目: hygieia-core   文件: TestResultEventListener.java
/**
 * Get performance test violation details
 */
public List getPerfTestViolation() {
    List<LinkedHashMap<Object, Object>> violationObjList = new ArrayList<>();
    LinkedHashMap<Object, Object> violationObjMap = new LinkedHashMap<>();
    if (!(isResponseTimeGood && isTxnGoodHealth && isErrorRateGood)){
        violationObjMap.put(VIOLATION_ATTRIBUTES.severity, STR_CRITICAL);
        violationObjMap.put(VIOLATION_ATTRIBUTES.incidentStatus, STR_OPEN);
    }
    if(!violationObjMap.isEmpty()) {
        violationObjList.add(violationObjMap);
    }
    return violationObjList;
}
 
源代码8 项目: generics-resolver   文件: GenericsTrackingUtils.java
/**
 * Track root generics with known middle type generic. For example, {@code Some<P> extends Base<P>}
 * and we know generic of {@code Base<T>} then it is possible to track that P == T and so known.
 *
 * @param type          root type to track generics for
 * @param known         class or interface with known generics (in the middle of root type hierarchy)
 * @param knownGenerics generics of known type
 * @return root class generics (row types were impossible to track)
 * @throws IllegalStateException when resolved generic of known type contradict with known generic value
 *                               (type can't be casted to known type)
 * @see GenericsUtils#trackGenerics(Type, Type) shortcut for most common case
 */
public static LinkedHashMap<String, Type> track(final Class<?> type,
                                                final Class<?> known,
                                                final LinkedHashMap<String, Type> knownGenerics) {
    if (type.getTypeParameters().length == 0 || knownGenerics.isEmpty()) {
        return EmptyGenericsMap.getInstance();
    }

    try {
        return trackGenerics(type, known, knownGenerics);
    } catch (Exception ex) {
        throw new GenericsTrackingException(type, known, knownGenerics, ex);
    }
}
 
源代码9 项目: pacbot   文件: ComplianceServiceImpl.java
private LinkedHashMap<String, Object> getRuleCategoryBWeightage(String domain, int totalCategories,
        Map<String, Map<String, Double>> rulesComplianceByCategory) throws ServiceException {
    int defaultWeightage = 0;
    Map<String, Object> ruleCatWeightageUnsortedMap;

    // get asset count by Target Type
    try {
        ruleCatWeightageUnsortedMap = repository.getRuleCategoryWeightagefromDB(domain);
    } catch (DataException e) {
        throw new ServiceException(e);
    }

    LinkedHashMap<String, Object> ruleCatWeightage = new LinkedHashMap<>();
    List<Entry<String, Object>> list = null;
    if (null != ruleCatWeightageUnsortedMap && !ruleCatWeightageUnsortedMap.isEmpty()) {
        Set<Entry<String, Object>> set = ruleCatWeightageUnsortedMap.entrySet();

        list = new ArrayList<>(set);
        Collections.sort(list, new Comparator<Map.Entry<String, Object>>() {
            public int compare(Map.Entry<String, Object> o1, Map.Entry<String, Object> o2) {
                return (o2.getValue().toString()).compareTo(o1.getValue().toString());
            }
        });

        for (Map.Entry<String, Object> entry : list) {
            ruleCatWeightage.put(entry.getKey(), entry.getValue());
        }
    }

    if (ruleCatWeightage.isEmpty()) {
        defaultWeightage = INT_HUNDRED / totalCategories;
        for (Map.Entry<String, Map<String, Double>> categoryDistribution : rulesComplianceByCategory.entrySet()) {
            ruleCatWeightage.put(categoryDistribution.getKey(), defaultWeightage);
        }
    }

    return ruleCatWeightage;
}
 
源代码10 项目: DataLogger   文件: ServerResponse.java
/**
 * Creates a new server response object.
 * @param httpCode HTTP response code
 * @param body HTTP response body
 * @param headers HTTP response headers
 */
protected ServerResponse(int httpCode, byte[] body, LinkedHashMap<String, String> headers) {
    this.httpCode = httpCode;

    if (body != null && body.length > 0)
        this.body = body;
    else
        this.body = new byte[1];

    if (headers != null && !headers.isEmpty())
        this.headers = headers;
    else
        this.headers = new LinkedHashMap<>(1);
}
 
源代码11 项目: Jupiter   文件: ConfigSection.java
/**
 * Constructor of ConfigSection, based on values stored in map.
 *
 * @param map
 */
public ConfigSection(LinkedHashMap<String, Object> map) {
    this();
    if (map == null || map.isEmpty()) return;
    for (Map.Entry<String, Object> entry : map.entrySet()) {
        if (entry.getValue() instanceof LinkedHashMap) {
            super.put(entry.getKey(), new ConfigSection((LinkedHashMap) entry.getValue()));
        } else {
            super.put(entry.getKey(), entry.getValue());
        }
    }
}
 
源代码12 项目: mycore   文件: MCRRealm.java
/**
 * Returns the URL where users from this realm can login with redirect URL attached.
 * If this realm has a attribut <code>redirectParameter</code> defined this method returns
 * a complete login URL with <code>redirectURL</code> properly configured.
 * @param redirectURL URL where to redirect to after login succeeds.
 * @return the same as {@link #getLoginURL()} if <code>redirectParameter</code> is undefined for this realm
 */
public String getLoginURL(String redirectURL) {
    LinkedHashMap<String, String> parameter = new LinkedHashMap<>();
    String redirect = getRedirectParameter();
    if (redirect != null && redirectURL != null) {
        parameter.put(redirect, redirectURL);
    }
    String realmParameter = getRealmParameter();
    if (realmParameter != null) {
        parameter.put(realmParameter, getID());
    }
    if (parameter.isEmpty()) {
        return getLoginURL();
    }
    StringBuilder loginURL = new StringBuilder(getLoginURL());
    boolean firstParameter = !getLoginURL().contains("?");
    for (Entry<String, String> entry : parameter.entrySet()) {
        if (firstParameter) {
            loginURL.append('?');
            firstParameter = false;
        } else {
            loginURL.append('&');
        }
        loginURL.append(entry.getKey()).append('=').append(URLEncoder.encode(entry.getValue(),
            StandardCharsets.UTF_8));
    }
    return loginURL.toString();
}
 
源代码13 项目: importer-exporter   文件: NamespaceAdapter.java
@Override
public NamespaceList marshal(LinkedHashMap<String, Namespace> v) {
    NamespaceList list = null;

    if (v != null && !v.isEmpty()) {
        list = new NamespaceList();
        list.namespaces = new ArrayList<>(v.values());
    }

    return list;
}
 
源代码14 项目: astor   文件: FunctionToBlockMutator.java
/**
 * @param fnName The name to use when preparing human readable names.
 * @param fnNode The function to prepare.
 * @param callNode The call node that will be replaced.
 * @param resultName Function results should be assigned to this name.
 * @param needsDefaultResult Whether the result value must be set.
 * @param isCallInLoop Whether the function body must be prepared to be
 *   injected into the body of a loop.
 * @return A clone of the function body mutated to be suitable for injection
 *   as a statement into another code block.
 */
Node mutate(String fnName, Node fnNode, Node callNode,
    String resultName, boolean needsDefaultResult, boolean isCallInLoop) {
  Node newFnNode = fnNode.cloneTree();
  // Now that parameter names have been replaced, make sure all the local
  // names are unique, to allow functions to be inlined multiple times
  // without causing conflicts.
  makeLocalNamesUnique(newFnNode, isCallInLoop);

  // Function declarations must be rewritten as function expressions as
  // they will be within a block and normalization prevents function
  // declarations within block as browser implementations vary.
  rewriteFunctionDeclarations(newFnNode.getLastChild());

  // TODO(johnlenz): Mark NAME nodes constant for parameters that are not
  // modified.
  Set<String> namesToAlias =
      FunctionArgumentInjector.findModifiedParameters(newFnNode);
  LinkedHashMap<String, Node> args =
      FunctionArgumentInjector.getFunctionCallParameterMap(
          newFnNode, callNode, this.safeNameIdSupplier);
  boolean hasArgs = !args.isEmpty();
  if (hasArgs) {
    FunctionArgumentInjector.maybeAddTempsForCallArguments(
        newFnNode, args, namesToAlias, compiler.getCodingConvention());
  }

  Node newBlock = NodeUtil.getFunctionBody(newFnNode);
  // Make the newBlock insertable .
  newBlock.detachFromParent();

  if (hasArgs) {
    Node inlineResult = aliasAndInlineArguments(newBlock,
        args, namesToAlias);
    Preconditions.checkState(newBlock == inlineResult);
  }

  //
  // For calls inlined into loops, VAR declarations are not reinitialized to
  // undefined as they would have been if the function were called, so ensure
  // that they are properly initialized.
  //
  if (isCallInLoop) {
    fixUnitializedVarDeclarations(newBlock);
  }

  String labelName = getLabelNameForFunction(fnName);
  Node injectableBlock = replaceReturns(
      newBlock, resultName, labelName, needsDefaultResult);
  Preconditions.checkState(injectableBlock != null);

  return injectableBlock;
}
 
源代码15 项目: teiid-spring-boot   文件: TeiidRSProvider.java
private InputStream executeProc(Connection conn, String procedureName,
        LinkedHashMap<String, Object> parameters, String charSet, boolean usingReturn) throws SQLException {
    // the generated code sends a empty string rather than null.
    if (charSet != null && charSet.trim().isEmpty()) {
        charSet = null;
    }
    Object result = null;
    StringBuilder sb = new StringBuilder();
    sb.append("{ "); //$NON-NLS-1$
    if (usingReturn) {
        sb.append("? = "); //$NON-NLS-1$
    }
    sb.append("CALL ").append(procedureName); //$NON-NLS-1$
    sb.append("("); //$NON-NLS-1$
    boolean first = true;
    for (Map.Entry<String, Object> entry : parameters.entrySet()) {
        if (entry.getValue() == null) {
            continue;
        }
        if (!first) {
            sb.append(", "); //$NON-NLS-1$
        }
        first = false;
        sb.append(SQLStringVisitor.escapeSinglePart(entry.getKey())).append("=>?"); //$NON-NLS-1$
    }
    sb.append(") }"); //$NON-NLS-1$

    CallableStatement statement = conn.prepareCall(sb.toString());
    if (!parameters.isEmpty()) {
        int i = usingReturn ? 2 : 1;
        for (Object value : parameters.values()) {
            if (value == null) {
                continue;
            }
            statement.setObject(i++, value);
        }
    }

    final boolean hasResultSet = statement.execute();
    if (hasResultSet) {
        ResultSet rs = statement.getResultSet();
        if (rs.next()) {
            result = rs.getObject(1);
        } else {
            throw new ResponseStatusException(HttpStatus.BAD_REQUEST, "Only result producing procedures are allowed");
        }
    } else if (usingReturn) {
        result = statement.getObject(1);
    }
    return handleResult(charSet, result);
}
 
源代码16 项目: mycore   文件: MCRURNGranularRESTService.java
private MCRDNBURN registerURN(MCRDerivate deriv, String filePath) throws MCRPersistentIdentifierException {
    MCRObjectID derivID = deriv.getId();

    Function<String, Integer> countCreatedPI = s -> MCRPIManager
        .getInstance()
        .getCreatedIdentifiers(derivID, getType(), getServiceID())
        .size();

    int seed = Optional.of(filePath)
        .filter(p -> !"".equals(p))
        .map(countCreatedPI)
        .map(count -> count + 1)
        .orElse(1);

    MCRDNBURN derivURN = Optional
        .ofNullable(deriv.getDerivate())
        .map(MCRObjectDerivate::getURN)
        .flatMap(new MCRDNBURNParser()::parse)
        .orElseGet(() -> createNewURN(deriv));

    String setID = derivID.getNumberAsString();
    GranularURNGenerator granularURNGen = new GranularURNGenerator(seed, derivURN, setID);
    Function<MCRPath, Supplier<String>> generateURN = p -> granularURNGen.getURNSupplier();

    LinkedHashMap<Supplier<String>, MCRPath> urnPathMap = derivateFileStream.apply(deriv)
        .filter(notInIgnoreList().and(matchFile(filePath)))
        .sorted()
        .collect(Collectors.toMap(generateURN, p -> p, (m1, m2) -> m1,
            LinkedHashMap::new));

    if (!"".equals(filePath) && urnPathMap.isEmpty()) {
        String errMsg = new MessageFormat("File {0} does not exist in {1}.\n", Locale.ROOT)
            .format(new Object[] { filePath, derivID.toString() })
            + "Use absolute path of file without owner ID like /abs/path/to/file.\n";

        throw new MCRPersistentIdentifierException(errMsg);
    }

    urnPathMap.forEach(createFileMetadata(deriv).andThen(persistURN(deriv)));

    try {
        MCRMetadataManager.update(deriv);
    } catch (MCRPersistenceException | MCRAccessException e) {
        LOGGER.error("Error while updating derivate {}", derivID, e);
    }

    EntityTransaction transaction = MCREntityManagerProvider
        .getCurrentEntityManager()
        .getTransaction();

    if (!transaction.isActive()) {
        transaction.begin();
    }

    transaction.commit();

    return derivURN;
}
 
源代码17 项目: datacollector   文件: JdbcMetadataProcessor.java
@Override
protected void process(Record record, BatchMaker batchMaker) throws StageException {
  try {
    ELVars variables = getContext().createELVars();
    RecordEL.setRecordInContext(variables, record);
    TimeEL.setCalendarInContext(variables, Calendar.getInstance());
    TimeNowEL.setTimeNowInContext(variables, new Date());

    String schema = (schemaEL != null) ? elEvals.dbNameELEval.eval(variables, schemaEL, String.class) : null;
    String tableName = elEvals.tableNameELEval.eval(variables, tableNameEL, String.class);

    if (StringUtils.isEmpty(schema)) {
      schema = null;
    }

    // Obtain the record structure from current record
    LinkedHashMap<String, JdbcTypeInfo> recordStructure = JdbcMetastoreUtil.convertRecordToJdbcType(
        record,
        decimalDefaultsConfig.precisionAttribute,
        decimalDefaultsConfig.scaleAttribute,
        schemaWriter);

    if (recordStructure.isEmpty()) {
      batchMaker.addRecord(record);
      return;
    }

    LinkedHashMap<String, JdbcTypeInfo> tableStructure = null;
    try {
      tableStructure = tableCache.get(Pair.of(schema, tableName));
    } catch (ExecutionException e) {
      throw new JdbcStageCheckedException(JdbcErrors.JDBC_203, e.getMessage(), e);
    }

    if (tableStructure.isEmpty()) {
      // Create table
      schemaWriter.createTable(schema, tableName, recordStructure);
      tableCache.put(Pair.of(schema, tableName), recordStructure);
    } else {
      // Compare tables
      LinkedHashMap<String, JdbcTypeInfo> columnDiff = JdbcMetastoreUtil.getDiff(tableStructure, recordStructure);
      if (!columnDiff.isEmpty()) {
        LOG.trace("Detected drift for table {} - new columns: {}",
            tableName,
            StringUtils.join(columnDiff.keySet(), ",")
        );
        schemaWriter.alterTable(schema, tableName, columnDiff);
        tableStructure.putAll(columnDiff);
        tableCache.put(Pair.of(schema, tableName), tableStructure);
      }
    }

    batchMaker.addRecord(record);
  } catch (JdbcStageCheckedException error) {
    LOG.error("Error happened when processing record", error);
    LOG.trace("Record that caused the error: {}", record.toString());
    errorRecordHandler.onError(new OnRecordErrorException(record, error.getErrorCode(), error.getParams()));
  }
}
 
源代码18 项目: limit-order-book   文件: LinkedOrderBook.java
private String getFirstKey(final LinkedHashMap<String, MarketOrder> marketOrders) {
	if (marketOrders.isEmpty())
		return null;
	final Map.Entry<String, MarketOrder> me = marketOrders.entrySet().iterator().next();
	return me.getKey();
}
 
源代码19 项目: pom-manipulation-ext   文件: WildcardMap.java
/**
 * Associates the specified value with the specified key in this map.
 * @param key key to associate with
 * @param value value to associate with the key
 */
public void put(ProjectRef key, T value)
{
    String groupId = key.getGroupId();
    String artifactId = key.getArtifactId();

    LinkedHashMap<String,T> vMap = map.get(groupId);
    if ( vMap == null)
    {
        vMap = new LinkedHashMap<>();
    }
    boolean wildcard = false;

    if ( WILDCARD.equals(artifactId))
    {
        // Erase any previous mappings.
        if (!vMap.isEmpty())
        {
            logger.warn ("Emptying map with keys " + vMap.keySet() + " as replacing with wildcard mapping " + key);
        }
        vMap.clear();
    }
    else
    {
        for ( Object o : vMap.keySet() )
        {
            if ( o.equals( WILDCARD ) )
            {
                wildcard = true;
                break;
            }
        }
    }
    if ( wildcard )
    {
        logger.warn ("Unable to add " + key + " with value " + value +
                " as wildcard mapping for " + groupId + " already exists.");
    }
    else
    {
        vMap.put(artifactId, value);
        map.put(groupId, vMap);
    }
}
 
源代码20 项目: datacollector   文件: HiveMetastoreUtil.java
/**
 * Fill in metadata to Record. This is for new schema creation.
 */
public static Field newSchemaMetadataFieldBuilder  (
    String database,
    String tableName,
    LinkedHashMap<String, HiveTypeInfo> columnList,
    LinkedHashMap<String, HiveTypeInfo> partitionTypeList,
    boolean internal,
    String location,
    String avroSchema,
    HMPDataFormat dataFormat
) throws HiveStageCheckedException  {
  LinkedHashMap<String, Field> metadata = new LinkedHashMap<>();
  metadata.put(VERSION, Field.create(SCHEMA_CHANGE_METADATA_RECORD_VERSION));
  metadata.put(METADATA_RECORD_TYPE, Field.create(MetadataRecordType.TABLE.name()));
  metadata.put(DATABASE_FIELD, Field.create(database));
  metadata.put(TABLE_FIELD, Field.create(tableName));
  metadata.put(LOCATION_FIELD, Field.create(location));
  metadata.put(DATA_FORMAT, Field.create(dataFormat.name()));

  //fill in column type list here
  metadata.put(
      COLUMNS_FIELD,
      generateInnerFieldFromTheList(
          columnList,
          COLUMN_NAME,
          TYPE_INFO,
          true
      )
  );
  //fill in partition type list here
  if (partitionTypeList != null && !partitionTypeList.isEmpty()) {
    metadata.put(
        PARTITION_FIELD,
        generateInnerFieldFromTheList(
            partitionTypeList,
            PARTITION_NAME,
            TYPE_INFO,
            true
        )
    );
  }
  metadata.put(INTERNAL_FIELD, Field.create(internal));
  metadata.put(AVRO_SCHEMA, Field.create(avroSchema));
  return Field.createListMap(metadata);
}