dfs.namenode.inode.attributes.provider.class
org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer
public abstract class INodeAttributeProvider {
/**
* Initialize the provider. This method is called at NameNode startup
* time.
*/
public abstract void start();
/**
* Shutdown the provider. This method is called at NameNode shutdown time.
*/
public abstract void stop();
public abstract INodeAttributes getAttributes(String[] pathElements,
INodeAttributes inode);
/**
* Can be over-ridden by implementations to provide a custom Access Control
* Enforcer that can provide an alternate implementation of the
* default permission checking logic.
* @param defaultEnforcer The Default AccessControlEnforcer
* @return The AccessControlEnforcer to use
*/
public AccessControlEnforcer getExternalAccessControlEnforcer(
AccessControlEnforcer defaultEnforcer) {
return defaultEnforcer;
}
Method start is used to initialize the plugin and getExternalAccessControlEnforcer is used to enforce permission check.
Method start is used to initialize the plugin.
public void start() {
if(LOG.isDebugEnabled()) {
LOG.debug("==> RangerHdfsAuthorizer.start()");
}
RangerHdfsPlugin plugin = new RangerHdfsPlugin();
plugin.init();
}
RangerHdfsPlugin extends RangerBasePlugin.
RangerBasePlugin implemented the common method of various plugin. It includes PolicyRefresher, RangerPolicyEngine, RangerAccessResultProcessor.
RangerAdminClient admin = createAdminClient(serviceName, appId, propertyPrefix);
refresher = new PolicyRefresher(this, serviceType, appId, serviceName, admin, pollingIntervalMs, cacheDir);
refresher.setDaemon(true);
refresher.startRefresher();
public void run() {
while(true) {
loadPolicy();
try {
Thread.sleep(pollingIntervalMs);
} catch(InterruptedException excp) {
LOG.info("PolicyRefresher(serviceName=" + serviceName + ").run(): interrupted! Exiting thread", excp);
break;
}
}
}
PolicyRefresher.loadPolicy
Firstly, it loads policy from PolicyAdmin, if success then saveToCache, else load from cache.
private void loadPolicy() {
try {
//load policy from PolicyAdmin
ServicePolicies svcPolicies = loadPolicyfromPolicyAdmin();
if (svcPolicies == null) {
//if Policy fetch from Policy Admin Fails, load from cache
if (!policiesSetInPlugin) {
svcPolicies = loadFromCache();
}
} else {
saveToCache(svcPolicies);
}
if (svcPolicies != null) {
plugIn.setPolicies(svcPolicies);
policiesSetInPlugin = true;
setLastActivationTimeInMillis(System.currentTimeMillis());
lastKnownVersion = svcPolicies.getPolicyVersion();
} else {
if (!policiesSetInPlugin && !serviceDefSetInPlugin) {
plugIn.setPolicies(null);
serviceDefSetInPlugin = true;
}
}
} catch (RangerServiceNotFoundException snfe) {
} catch (Exception excp) {
}
}
The admin used to fech policy is created using createAdminClient.
RangerAdminClient admin = createAdminClient(serviceName, appId, propertyPrefix);
Using the setting ranger.plugin.hdfs.policy.source.impl
in conf/ranger-hdfs-security.xml to create admin.
<property>
<name>ranger.plugin.hdfs.service.namename>
<value>hadoopdevvalue>
<description>
Name of the Ranger service containing policies for this YARN instance
description>
property>
<property>
<name>ranger.plugin.hdfs.policy.source.implname>
<value>org.apache.ranger.admin.client.RangerAdminRESTClientvalue>
<description>
Class to retrieve policies from the source
description>
property>
<property>
<name>ranger.plugin.hdfs.policy.rest.urlname>
<value>http://localhost:8098value>
<description>
URL to Ranger Admin
description>
property>
public ServicePolicies getServicePoliciesIfUpdated(final long lastKnownVersion, final long lastActivationTimeInMillis) throws Exception {
// RangerRESTUtils.REST_URL_POLICY_GET_FOR_SERVICE_IF_UPDATED ="/service/plugins/secure/policies/download/"
WebResource webResource = createWebResource(RangerRESTUtils.REST_URL_POLICY_GET_FOR_SERVICE_IF_UPDATED + serviceName)
.queryParam(RangerRESTUtils.REST_PARAM_LAST_KNOWN_POLICY_VERSION, Long.toString(lastKnownVersion))
.queryParam(RangerRESTUtils.REST_PARAM_LAST_ACTIVATION_TIME, Long.toString(lastActivationTimeInMillis))
.queryParam(RangerRESTUtils.REST_PARAM_PLUGIN_ID, pluginId)
.queryParam(RangerRESTUtils.REST_PARAM_CLUSTER_NAME, clusterName);
response = webResource.accept(RangerRESTUtils.REST_MIME_TYPE_JSON).get(ClientResponse.class);
FSPermissionChecker in package org.apache.hadoop.hdfs.server.namenode.
When checkPermission is called, it will call external access control enforcer. By default, it returns defaultEnforcer – FSPermissionChecker
public AccessControlEnforcer getExternalAccessControlEnforcer(
AccessControlEnforcer defaultEnforcer) {
return defaultEnforcer;
}
void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner,
FsAction ancestorAccess, FsAction parentAccess, FsAction access,
FsAction subAccess, boolean ignoreEmptyDir)
throws AccessControlException {
...
AccessControlEnforcer enforcer =
getAttributesProvider().getExternalAccessControlEnforcer(this);
enforcer.checkPermission(fsOwner, supergroup, callerUgi, inodeAttrs, inodes,
pathByNameArr, snapshotId, path, ancestorIndex, doCheckOwner,
ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir);
}
/**
* Checks permission on a file system object. Has to throw an Exception
* if the filesystem object is not accessessible by the calling Ugi.
* @param fsOwner Filesystem owner (The Namenode user)
* @param supergroup super user geoup
* @param callerUgi UserGroupInformation of the caller
* @param inodeAttrs Array of INode attributes for each path element in the
* the path
* @param inodes Array of INodes for each path element in the path
* @param pathByNameArr Array of byte arrays of the LocalName
* @param snapshotId the snapshotId of the requested path
* @param path Path String
* @param ancestorIndex Index of ancestor
* @param doCheckOwner perform ownership check
* @param ancestorAccess The access required by the ancestor of the path.
* @param parentAccess The access required by the parent of the path.
* @param access The access required by the path.
* @param subAccess If path is a directory, It is the access required of
* the path and all the sub-directories. If path is not a
* directory, there should ideally be no effect.
* @param ignoreEmptyDir Ignore permission checking for empty directory?
* @throws AccessControlException
*/
@Override
public void checkPermission(String fsOwner, String supergroup,
UserGroupInformation callerUgi, INodeAttributes[] inodeAttrs,
INode[] inodes, byte[][] pathByNameArr, int snapshotId, String path,
int ancestorIndex, boolean doCheckOwner, FsAction ancestorAccess,
FsAction parentAccess, FsAction access, FsAction subAccess,
boolean ignoreEmptyDir)
throws AccessControlException {
for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null;
ancestorIndex--);
checkTraverse(inodeAttrs, path, ancestorIndex);
final INodeAttributes last = inodeAttrs[inodeAttrs.length - 1];
if (parentAccess != null && parentAccess.implies(FsAction.WRITE)
&& inodeAttrs.length > 1 && last != null) {
checkStickyBit(inodeAttrs[inodeAttrs.length - 2], last);
}
if (ancestorAccess != null && inodeAttrs.length > 1) {
check(inodeAttrs, path, ancestorIndex, ancestorAccess);
}
if (parentAccess != null && inodeAttrs.length > 1) {
check(inodeAttrs, path, inodeAttrs.length - 2, parentAccess);
}
if (access != null) {
check(last, path, access);
}
if (subAccess != null) {
INode rawLast = inodes[inodeAttrs.length - 1];
checkSubAccess(pathByNameArr, inodeAttrs.length - 1, rawLast,
snapshotId, subAccess, ignoreEmptyDir);
}
if (doCheckOwner) {
checkOwner(last);
}
}
@Override
public AccessControlEnforcer getExternalAccessControlEnforcer(AccessControlEnforcer defaultEnforcer) {
RangerAccessControlEnforcer rangerAce = new RangerAccessControlEnforcer(defaultEnforcer);
return rangerAce;
}
public RangerAccessControlEnforcer(AccessControlEnforcer defaultEnforcer) {
this.defaultEnforcer = defaultEnforcer;
}
RangerAccessControlEnforcer.checkPermission using plugin.isAccessAllowed(request, auditHandler) at first. If the plugin returns not determined, then it using the default check.
@Override
public void checkPermission(String fsOwner, String superGroup, UserGroupInformation ugi,
INodeAttributes[] inodeAttrs, INode[] inodes, byte[][] pathByNameArr,
int snapshotId, String path, int ancestorIndex, boolean doCheckOwner,
FsAction ancestorAccess, FsAction parentAccess, FsAction access,
FsAction subAccess, boolean ignoreEmptyDir) throws AccessControlException {
AuthzStatus authzStatus = AuthzStatus.NOT_DETERMINED;
...
// checkINodeAccess
if(authzStatus == AuthzStatus.ALLOW && access != null && inode != null) {
INodeAttributes inodeAttribs = inodeAttrs.length > 0 ? inodeAttrs[inodeAttrs.length - 1] : null;
authzStatus = isAccessAllowed(inode, inodeAttribs, access, user, groups, plugin, auditHandler);
if (authzStatus == AuthzStatus.NOT_DETERMINED) {
authzStatus = checkDefaultEnforcer(fsOwner, superGroup, ugi, inodeAttrs, inodes,
pathByNameArr, snapshotId, path, ancestorIndex, doCheckOwner,
null, null, access, null, ignoreEmptyDir,
isTraverseOnlyCheck, ancestor, parent, inode, auditHandler);
}
}
private AuthzStatus isAccessAllowed(INode inode, INodeAttributes inodeAttribs, FsAction access, String user, Set<String> groups, RangerHdfsPlugin plugin, RangerHdfsAuditHandler auditHandler) {
AuthzStatus ret = null;
String path = inode != null ? inode.getFullPathName() : null;
String pathOwner = inodeAttribs != null ? inodeAttribs.getUserName() : null;
String clusterName = plugin.getClusterName();
if(pathOwner == null && inode != null) {
pathOwner = inode.getUserName();
}
if (RangerHadoopConstants.HDFS_ROOT_FOLDER_PATH_ALT.equals(path)) {
path = RangerHadoopConstants.HDFS_ROOT_FOLDER_PATH;
}
Set<String> accessTypes = access2ActionListMapper.get(access);
if(accessTypes == null) {
LOG.warn("RangerAccessControlEnforcer.isAccessAllowed(" + path + ", " + access + ", " + user + "): no Ranger accessType found for " + access);
accessTypes = access2ActionListMapper.get(FsAction.NONE);
}
for(String accessType : accessTypes) {
RangerHdfsAccessRequest request = new RangerHdfsAccessRequest(inode, path, pathOwner, access, accessType, user, groups, clusterName);
RangerAccessResult result = plugin.isAccessAllowed(request, auditHandler);
if (result == null || !result.getIsAccessDetermined()) {
ret = AuthzStatus.NOT_DETERMINED;
// don't break yet; subsequent accessType could be denied
} else if(! result.getIsAllowed()) { // explicit deny
ret = AuthzStatus.DENY;
break;
} else { // allowed
if(!AuthzStatus.NOT_DETERMINED.equals(ret)) { // set to ALLOW only if there was no NOT_DETERMINED earlier
ret = AuthzStatus.ALLOW;
}
}
}
if(ret == null) {
ret = AuthzStatus.NOT_DETERMINED;
}
if(LOG.isDebugEnabled()) {
LOG.debug("<== RangerAccessControlEnforcer.isAccessAllowed(" + path + ", " + access + ", " + user + "): " + ret);
}
return ret;
}