类org.apache.hadoop.fs.permission.PermissionStatus源码实例Demo

下面列出了怎么用org.apache.hadoop.fs.permission.PermissionStatus的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: big-c   文件: FSImageFormat.java
/** Load {@link INodeFileAttributes}. */
public INodeFileAttributes loadINodeFileAttributes(DataInput in)
    throws IOException {
  final int layoutVersion = getLayoutVersion();
  
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
    return loadINodeWithLocalName(true, in, false).asFile();
  }
  
  final byte[] name = FSImageSerialization.readLocalName(in);
  final PermissionStatus permissions = PermissionStatus.read(in);
  final long modificationTime = in.readLong();
  final long accessTime = in.readLong();
  
  final short replication = namesystem.getBlockManager().adjustReplication(
      in.readShort());
  final long preferredBlockSize = in.readLong();

  return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
      accessTime, replication, preferredBlockSize, (byte) 0, null);
}
 
源代码2 项目: big-c   文件: FSImageLoader.java
private PermissionStatus getPermissionStatus(String path) throws IOException {
  long id = lookup(path);
  FsImageProto.INodeSection.INode inode = fromINodeId(id);
  switch (inode.getType()) {
    case FILE: {
      FsImageProto.INodeSection.INodeFile f = inode.getFile();
      return FSImageFormatPBINode.Loader.loadPermission(
          f.getPermission(), stringTable);
    }
    case DIRECTORY: {
      FsImageProto.INodeSection.INodeDirectory d = inode.getDirectory();
      return FSImageFormatPBINode.Loader.loadPermission(
          d.getPermission(), stringTable);
    }
    case SYMLINK: {
      FsImageProto.INodeSection.INodeSymlink s = inode.getSymlink();
      return FSImageFormatPBINode.Loader.loadPermission(
          s.getPermission(), stringTable);
    }
    default: {
      return null;
    }
  }
}
 
源代码3 项目: big-c   文件: TestGetBlockLocations.java
private static FSNamesystem setupFileSystem() throws IOException {
  Configuration conf = new Configuration();
  conf.setLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1L);
  FSEditLog editlog = mock(FSEditLog.class);
  FSImage image = mock(FSImage.class);
  when(image.getEditLog()).thenReturn(editlog);
  final FSNamesystem fsn = new FSNamesystem(conf, image, true);

  final FSDirectory fsd = fsn.getFSDirectory();
  INodesInPath iip = fsd.getINodesInPath("/", true);
  PermissionStatus perm = new PermissionStatus(
      "hdfs", "supergroup",
      FsPermission.createImmutable((short) 0x1ff));
  final INodeFile file = new INodeFile(
      MOCK_INODE_ID, FILE_NAME.getBytes(Charsets.UTF_8),
      perm, 1, 1, new BlockInfoContiguous[] {}, (short) 1,
      DFS_BLOCK_SIZE_DEFAULT);
  fsn.getFSDirectory().addINode(iip, file);
  return fsn;
}
 
源代码4 项目: big-c   文件: NativeAzureFileSystem.java
@Override
public void setOwner(Path p, String username, String groupname)
    throws IOException {
  Path absolutePath = makeAbsolute(p);
  String key = pathToKey(absolutePath);
  FileMetadata metadata = store.retrieveMetadata(key);
  if (metadata == null) {
    throw new FileNotFoundException("File doesn't exist: " + p);
  }
  PermissionStatus newPermissionStatus = new PermissionStatus(
      username == null ?
          metadata.getPermissionStatus().getUserName() : username,
      groupname == null ?
          metadata.getPermissionStatus().getGroupName() : groupname,
      metadata.getPermissionStatus().getPermission());
  if (metadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
    // It's an implicit folder, need to materialize it.
    store.storeEmptyFolder(key, newPermissionStatus);
  } else {
    store.changePermissionStatus(key, newPermissionStatus);
  }
}
 
源代码5 项目: hadoop   文件: FSImageFormat.java
/** Load {@link INodeFileAttributes}. */
public INodeFileAttributes loadINodeFileAttributes(DataInput in)
    throws IOException {
  final int layoutVersion = getLayoutVersion();
  
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
    return loadINodeWithLocalName(true, in, false).asFile();
  }
  
  final byte[] name = FSImageSerialization.readLocalName(in);
  final PermissionStatus permissions = PermissionStatus.read(in);
  final long modificationTime = in.readLong();
  final long accessTime = in.readLong();
  
  final short replication = namesystem.getBlockManager().adjustReplication(
      in.readShort());
  final long preferredBlockSize = in.readLong();

  return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
      accessTime, replication, preferredBlockSize, (byte) 0, null);
}
 
源代码6 项目: hadoop   文件: FSEditLog.java
/** 
 * Add create directory record to edit log
 */
public void logMkDir(String path, INode newNode) {
  PermissionStatus permissions = newNode.getPermissionStatus();
  MkdirOp op = MkdirOp.getInstance(cache.get())
    .setInodeId(newNode.getId())
    .setPath(path)
    .setTimestamp(newNode.getModificationTime())
    .setPermissionStatus(permissions);

  AclFeature f = newNode.getAclFeature();
  if (f != null) {
    op.setAclEntries(AclStorage.readINodeLogicalAcl(newNode));
  }

  XAttrFeature x = newNode.getXAttrFeature();
  if (x != null) {
    op.setXAttrs(x.getXAttrs());
  }
  logEdit(op);
}
 
源代码7 项目: hadoop   文件: FSImageLoader.java
private PermissionStatus getPermissionStatus(String path) throws IOException {
  long id = lookup(path);
  FsImageProto.INodeSection.INode inode = fromINodeId(id);
  switch (inode.getType()) {
    case FILE: {
      FsImageProto.INodeSection.INodeFile f = inode.getFile();
      return FSImageFormatPBINode.Loader.loadPermission(
          f.getPermission(), stringTable);
    }
    case DIRECTORY: {
      FsImageProto.INodeSection.INodeDirectory d = inode.getDirectory();
      return FSImageFormatPBINode.Loader.loadPermission(
          d.getPermission(), stringTable);
    }
    case SYMLINK: {
      FsImageProto.INodeSection.INodeSymlink s = inode.getSymlink();
      return FSImageFormatPBINode.Loader.loadPermission(
          s.getPermission(), stringTable);
    }
    default: {
      return null;
    }
  }
}
 
源代码8 项目: hadoop   文件: TestGetBlockLocations.java
private static FSNamesystem setupFileSystem() throws IOException {
  Configuration conf = new Configuration();
  conf.setLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1L);
  FSEditLog editlog = mock(FSEditLog.class);
  FSImage image = mock(FSImage.class);
  when(image.getEditLog()).thenReturn(editlog);
  final FSNamesystem fsn = new FSNamesystem(conf, image, true);

  final FSDirectory fsd = fsn.getFSDirectory();
  INodesInPath iip = fsd.getINodesInPath("/", true);
  PermissionStatus perm = new PermissionStatus(
      "hdfs", "supergroup",
      FsPermission.createImmutable((short) 0x1ff));
  final INodeFile file = new INodeFile(
      MOCK_INODE_ID, FILE_NAME.getBytes(Charsets.UTF_8),
      perm, 1, 1, new BlockInfoContiguous[] {}, (short) 1,
      DFS_BLOCK_SIZE_DEFAULT);
  fsn.getFSDirectory().addINode(iip, file);
  return fsn;
}
 
源代码9 项目: hadoop   文件: TestDefaultBlockPlacementPolicy.java
@Before
public void setup() throws IOException {
  StaticMapping.resetMap();
  Configuration conf = new HdfsConfiguration();
  final String[] racks = { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" };
  final String[] hosts = { "/host0", "/host1", "/host2", "/host3", "/host4" };

  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).racks(racks)
      .hosts(hosts).build();
  cluster.waitActive();
  nameNodeRpc = cluster.getNameNodeRpc();
  namesystem = cluster.getNamesystem();
  perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null,
      FsPermission.getDefault());
}
 
源代码10 项目: hadoop-gpu   文件: NameNode.java
/** {@inheritDoc} */
public void create(String src, 
                   FsPermission masked,
                           String clientName, 
                           boolean overwrite,
                           short replication,
                           long blockSize
                           ) throws IOException {
  String clientMachine = getClientMachine();
  if (stateChangeLog.isDebugEnabled()) {
    stateChangeLog.debug("*DIR* NameNode.create: file "
                       +src+" for "+clientName+" at "+clientMachine);
  }
  if (!checkPathLength(src)) {
    throw new IOException("create: Pathname too long.  Limit " 
                          + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
  }
  namesystem.startFile(src,
      new PermissionStatus(UserGroupInformation.getCurrentUGI().getUserName(),
          null, masked),
      clientName, clientMachine, overwrite, replication, blockSize);
  myMetrics.numFilesCreated.inc();
  myMetrics.numCreateFileOps.inc();
}
 
源代码11 项目: big-c   文件: TestDefaultBlockPlacementPolicy.java
@Before
public void setup() throws IOException {
  StaticMapping.resetMap();
  Configuration conf = new HdfsConfiguration();
  final String[] racks = { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" };
  final String[] hosts = { "/host0", "/host1", "/host2", "/host3", "/host4" };

  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).racks(racks)
      .hosts(hosts).build();
  cluster.waitActive();
  nameNodeRpc = cluster.getNameNodeRpc();
  namesystem = cluster.getNamesystem();
  perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null,
      FsPermission.getDefault());
}
 
源代码12 项目: RDFS   文件: INodeFileUnderConstruction.java
public INodeFileUnderConstruction(byte[] name,
                           short blockReplication,
                           long modificationTime,
                           long preferredBlockSize,
                           BlockInfo[] blocks,
                           PermissionStatus perm,
                           String clientName,
                           String clientMachine,
                           DatanodeDescriptor clientNode) {
  super(perm, blocks, blockReplication, modificationTime, modificationTime,
        preferredBlockSize);
  setLocalName(name);
  this.clientName = clientName;
  this.clientMachine = clientMachine;
  this.clientNode = clientNode;
}
 
源代码13 项目: big-c   文件: FSEditLog.java
/** 
 * Add create directory record to edit log
 */
public void logMkDir(String path, INode newNode) {
  PermissionStatus permissions = newNode.getPermissionStatus();
  MkdirOp op = MkdirOp.getInstance(cache.get())
    .setInodeId(newNode.getId())
    .setPath(path)
    .setTimestamp(newNode.getModificationTime())
    .setPermissionStatus(permissions);

  AclFeature f = newNode.getAclFeature();
  if (f != null) {
    op.setAclEntries(AclStorage.readINodeLogicalAcl(newNode));
  }

  XAttrFeature x = newNode.getXAttrFeature();
  if (x != null) {
    op.setXAttrs(x.getXAttrs());
  }
  logEdit(op);
}
 
源代码14 项目: big-c   文件: FSImageTestUtil.java
/**
 * Create an aborted in-progress log in the given directory, containing
 * only a specified number of "mkdirs" operations.
 */
public static void createAbortedLogWithMkdirs(File editsLogDir, int numDirs,
    long firstTxId, long newInodeId) throws IOException {
  FSEditLog editLog = FSImageTestUtil.createStandaloneEditLog(editsLogDir);
  editLog.setNextTxId(firstTxId);
  editLog.openForWrite();
  
  PermissionStatus perms = PermissionStatus.createImmutable("fakeuser", "fakegroup",
      FsPermission.createImmutable((short)0755));
  for (int i = 1; i <= numDirs; i++) {
    String dirName = "dir" + i;
    INodeDirectory dir = new INodeDirectory(newInodeId + i - 1,
        DFSUtil.string2Bytes(dirName), perms, 0L);
    editLog.logMkDir("/" + dirName, dir);
  }
  editLog.logSync();
  editLog.abortCurrentLogSegment();
}
 
源代码15 项目: hadoop   文件: NativeAzureFileSystem.java
@Override
public void setPermission(Path p, FsPermission permission) throws IOException {
  Path absolutePath = makeAbsolute(p);
  String key = pathToKey(absolutePath);
  FileMetadata metadata = store.retrieveMetadata(key);
  if (metadata == null) {
    throw new FileNotFoundException("File doesn't exist: " + p);
  }
  permission = applyUMask(permission,
      metadata.isDir() ? UMaskApplyMode.ChangeExistingDirectory
          : UMaskApplyMode.ChangeExistingFile);
  if (metadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
    // It's an implicit folder, need to materialize it.
    store.storeEmptyFolder(key, createPermissionStatus(permission));
  } else if (!metadata.getPermissionStatus().getPermission().
      equals(permission)) {
    store.changePermissionStatus(key, new PermissionStatus(
        metadata.getPermissionStatus().getUserName(),
        metadata.getPermissionStatus().getGroupName(),
        permission));
  }
}
 
源代码16 项目: RDFS   文件: FSImageSerialization.java
static INodeFileUnderConstruction readINodeUnderConstruction(
                          DataInputStream in) throws IOException {
  byte[] name = readBytes(in);
  String path = DFSUtil.bytes2String(name);
  short blockReplication = in.readShort();
  long modificationTime = in.readLong();
  long preferredBlockSize = in.readLong();
  int numBlocks = in.readInt();
  BlockInfo[] blocks = new BlockInfo[numBlocks];
  Block blk = new Block();
  for (int i = 0; i < numBlocks; i++) {
    blk.readFields(in);
    blocks[i] = new BlockInfo(blk, blockReplication);
  }
  PermissionStatus perm = PermissionStatus.read(in);
  String clientName = readString(in);
  String clientMachine = readString(in);

  // These locations are not used at all
  int numLocs = in.readInt();
  DatanodeDescriptor[] locations = new DatanodeDescriptor[numLocs];
  for (int i = 0; i < numLocs; i++) {
    locations[i] = new DatanodeDescriptor();
    locations[i].readFields(in);
  }

  return new INodeFileUnderConstruction(name, 
                                        blockReplication, 
                                        modificationTime,
                                        preferredBlockSize,
                                        blocks,
                                        perm,
                                        clientName,
                                        clientMachine,
                                        null);
}
 
源代码17 项目: RDFS   文件: FSEditLogOp.java
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {

  this.length = in.readInt();
  if (-17 < logVersion && length != 2 ||
      logVersion <= -17 && length != 3) {
    throw new IOException("Incorrect data format. "
                          + "Mkdir operation.");
  }
  this.path = FSImageSerialization.readString(in);
  this.timestamp = readLong(in);

  // The disk format stores atimes for directories as well.
  // However, currently this is not being updated/used because of
  // performance reasons.
  if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) {
    /* unused this.atime = */
    readLong(in);
   }

  if (logVersion <= -11) {
    this.permissions = PermissionStatus.read(in);
  } else {
    this.permissions = null;
  }
}
 
源代码18 项目: hadoop   文件: INodeDirectoryAttributes.java
public CopyWithQuota(byte[] name, PermissionStatus permissions,
    AclFeature aclFeature, long modificationTime, long nsQuota,
    long dsQuota, EnumCounters<StorageType> typeQuotas, XAttrFeature xAttrsFeature) {
  super(name, permissions, aclFeature, modificationTime, xAttrsFeature);
  this.quota = new QuotaCounts.Builder().nameSpace(nsQuota).
      storageSpace(dsQuota).typeSpaces(typeQuotas).build();
}
 
源代码19 项目: big-c   文件: FSDirectory.java
/**
 * Add the given filename to the fs.
 * @return the new INodesInPath instance that contains the new INode
 */
INodesInPath addFile(INodesInPath existing, String localName, PermissionStatus
    permissions, short replication, long preferredBlockSize,
    String clientName, String clientMachine)
  throws FileAlreadyExistsException, QuotaExceededException,
    UnresolvedLinkException, SnapshotAccessControlException, AclException {

  long modTime = now();
  INodeFile newNode = newINodeFile(allocateNewInodeId(), permissions, modTime,
      modTime, replication, preferredBlockSize);
  newNode.setLocalName(localName.getBytes(Charsets.UTF_8));
  newNode.toUnderConstruction(clientName, clientMachine);

  INodesInPath newiip;
  writeLock();
  try {
    newiip = addINode(existing, newNode);
  } finally {
    writeUnlock();
  }
  if (newiip == null) {
    NameNode.stateChangeLog.info("DIR* addFile: failed to add " +
        existing.getPath() + "/" + localName);
    return null;
  }

  if(NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* addFile: " + localName + " is added");
  }
  return newiip;
}
 
源代码20 项目: hadoop-gpu   文件: TestDistCh.java
static void checkFileStatus(PermissionStatus expected, FileStatus actual) {
  assertEquals(expected.getUserName(), actual.getOwner());
  assertEquals(expected.getGroupName(), actual.getGroup());
  FsPermission perm = expected.getPermission(); 
  if (!actual.isDir()) {
    perm = perm.applyUMask(UMASK);
  }
  assertEquals(perm, actual.getPermission());
}
 
源代码21 项目: hadoop   文件: FSDirSymlinkOp.java
static INodeSymlink unprotectedAddSymlink(FSDirectory fsd, INodesInPath iip,
    byte[] localName, long id, String target, long mtime, long atime,
    PermissionStatus perm)
    throws UnresolvedLinkException, QuotaExceededException {
  assert fsd.hasWriteLock();
  final INodeSymlink symlink = new INodeSymlink(id, null, perm, mtime, atime,
      target);
  symlink.setLocalName(localName);
  return fsd.addINode(iip, symlink) != null ? symlink : null;
}
 
源代码22 项目: big-c   文件: NameNodeRpcServer.java
@Override // ClientProtocol
public void createSymlink(String target, String link, FsPermission dirPerms,
    boolean createParent) throws IOException {
  checkNNStartup();
  CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return; // Return previous response
  }

  /* We enforce the MAX_PATH_LENGTH limit even though a symlink target
   * URI may refer to a non-HDFS file system. 
   */
  if (!checkPathLength(link)) {
    throw new IOException("Symlink path exceeds " + MAX_PATH_LENGTH +
                          " character limit");
                          
  }

  final UserGroupInformation ugi = getRemoteUser();

  boolean success = false;
  try {
    PermissionStatus perm = new PermissionStatus(ugi.getShortUserName(),
        null, dirPerms);
    namesystem.createSymlink(target, link, perm, createParent,
        cacheEntry != null);
    success = true;
  } finally {
    RetryCache.setState(cacheEntry, success);
  }
}
 
源代码23 项目: big-c   文件: INodeWithAdditionalFields.java
/** Encode the {@link PermissionStatus} to a long. */
static long toLong(PermissionStatus ps) {
  long permission = 0L;
  final int user = SerialNumberManager.INSTANCE.getUserSerialNumber(
      ps.getUserName());
  permission = USER.BITS.combine(user, permission);
  final int group = SerialNumberManager.INSTANCE.getGroupSerialNumber(
      ps.getGroupName());
  permission = GROUP.BITS.combine(group, permission);
  final int mode = ps.getPermission().toShort();
  permission = MODE.BITS.combine(mode, permission);
  return permission;
}
 
源代码24 项目: hadoop-gpu   文件: INodeFile.java
protected INodeFile(PermissionStatus permissions, BlockInfo[] blklist,
                    short replication, long modificationTime,
                    long atime, long preferredBlockSize) {
  super(permissions, modificationTime, atime);
  this.blockReplication = replication;
  this.preferredBlockSize = preferredBlockSize;
  blocks = blklist;
}
 
源代码25 项目: hadoop-gpu   文件: INodeDirectoryWithQuota.java
/** constructor with no quota verification */
INodeDirectoryWithQuota(
    PermissionStatus permissions, long modificationTime, 
    long nsQuota, long dsQuota)
{
  super(permissions, modificationTime);
  this.nsQuota = nsQuota;
  this.dsQuota = dsQuota;
  this.nsCount = 1;
}
 
源代码26 项目: hadoop   文件: INodeWithAdditionalFields.java
/** Encode the {@link PermissionStatus} to a long. */
static long toLong(PermissionStatus ps) {
  long permission = 0L;
  final int user = SerialNumberManager.INSTANCE.getUserSerialNumber(
      ps.getUserName());
  permission = USER.BITS.combine(user, permission);
  final int group = SerialNumberManager.INSTANCE.getGroupSerialNumber(
      ps.getGroupName());
  permission = GROUP.BITS.combine(group, permission);
  final int mode = ps.getPermission().toShort();
  permission = MODE.BITS.combine(mode, permission);
  return permission;
}
 
源代码27 项目: hadoop-gpu   文件: INodeFileUnderConstruction.java
INodeFileUnderConstruction(PermissionStatus permissions,
                           short replication,
                           long preferredBlockSize,
                           long modTime,
                           String clientName,
                           String clientMachine,
                           DatanodeDescriptor clientNode) {
  super(permissions.applyUMask(UMASK), 0, replication, modTime, modTime,
      preferredBlockSize);
  this.clientName = clientName;
  this.clientMachine = clientMachine;
  this.clientNode = clientNode;
}
 
源代码28 项目: hadoop   文件: FSImageFormatPBINode.java
public static PermissionStatus loadPermission(long id,
    final String[] stringTable) {
  short perm = (short) (id & ((1 << GROUP_STRID_OFFSET) - 1));
  int gsid = (int) ((id >> GROUP_STRID_OFFSET) & USER_GROUP_STRID_MASK);
  int usid = (int) ((id >> USER_STRID_OFFSET) & USER_GROUP_STRID_MASK);
  return new PermissionStatus(stringTable[usid], stringTable[gsid],
      new FsPermission(perm));
}
 
源代码29 项目: big-c   文件: TestFSPermissionChecker.java
private static INodeFile createINodeFile(INodeDirectory parent, String name,
    String owner, String group, short perm) throws IOException {
  PermissionStatus permStatus = PermissionStatus.createImmutable(owner, group,
    FsPermission.createImmutable(perm));
  INodeFile inodeFile = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
    name.getBytes("UTF-8"), permStatus, 0L, 0L, null, REPLICATION,
    PREFERRED_BLOCK_SIZE, (byte)0);
  parent.addChild(inodeFile);
  return inodeFile;
}
 
源代码30 项目: big-c   文件: AzureNativeFileSystemStore.java
/**
 * Changes the permission status on the given key.
 */
@Override
public void changePermissionStatus(String key, PermissionStatus newPermission)
    throws AzureException {
  try {
    checkContainer(ContainerAccessType.ReadThenWrite);
    CloudBlobWrapper blob = getBlobReference(key);
    blob.downloadAttributes(getInstrumentedContext());
    storePermissionStatus(blob, newPermission);
    blob.uploadMetadata(getInstrumentedContext());
  } catch (Exception e) {
    throw new AzureException(e);
  }
}
 
 类所在包
 同包方法