Github user knusbaum commented on a diff in the pull request: https://github.com/apache/storm/pull/845#discussion_r44989851 --- Diff: external/storm-hdfs/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStore.java --- @@ -0,0 +1,381 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.storm.hdfs.blobstore; + +import backtype.storm.Config; +import backtype.storm.blobstore.AtomicOutputStream; +import backtype.storm.blobstore.AtomicOutputStream; +import backtype.storm.blobstore.BlobStore; +import backtype.storm.blobstore.BlobStoreAclHandler; +import backtype.storm.blobstore.BlobStoreFile; +import backtype.storm.blobstore.InputStreamWithMeta; +import backtype.storm.generated.AuthorizationException; +import backtype.storm.generated.KeyNotFoundException; +import backtype.storm.generated.KeyAlreadyExistsException; +import backtype.storm.generated.ReadableBlobMeta; +import backtype.storm.generated.SettableBlobMeta; +import backtype.storm.nimbus.NimbusInfo; +import backtype.storm.utils.NimbusClient; +import backtype.storm.utils.Utils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.thrift7.TBase; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.security.auth.Subject; +import java.io.ByteArrayOutputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Iterator; +import java.util.Map; + +import static backtype.storm.blobstore.BlobStoreAclHandler.ADMIN; +import static backtype.storm.blobstore.BlobStoreAclHandler.READ; +import static backtype.storm.blobstore.BlobStoreAclHandler.WRITE; + +/** + * Provides a HDFS file system backed blob store implementation. + * Note that this provides an api for having HDFS be the backing store for the blobstore, + * it is not a service/daemon. + */ +public class HdfsBlobStore extends BlobStore { + public static final Logger LOG = LoggerFactory.getLogger(HdfsBlobStore.class); + private static final String DATA_PREFIX = "data_"; + private static final String META_PREFIX = "meta_"; + private BlobStoreAclHandler _aclHandler; + private HdfsBlobStoreImpl _hbs; + private Subject _localSubject; + private Map conf; + + /* + * Get the subject from Hadoop so we can use it to validate the acls. There is no direct + * interface from UserGroupInformation to get the subject, so do a doAs and get the context. + * We could probably run everything in the doAs but for now just grab the subject. + */ + private Subject getHadoopUser() { + Subject subj; + try { + subj = UserGroupInformation.getCurrentUser().doAs( + new PrivilegedAction<Subject>() { + @Override + public Subject run() { + return Subject.getSubject(AccessController.getContext()); + } + }); + } catch (IOException e) { + throw new RuntimeException("Error creating subject and logging user in!", e); + } + return subj; + } + + // If who is null then we want to use the user hadoop says we are. + // Required for the supervisor to call these routines as its not + // logged in as anyone. + private Subject checkAndGetSubject(Subject who) { + if (who == null) { + return _localSubject; + } + return who; + } + + @Override + public void prepare(Map conf, String overrideBase, NimbusInfo nimbusInfo) { + this.conf = conf; + prepareInternal(conf, overrideBase, null); + } + + /* + * Allow a Hadoop Configuration to be passed for testing. If it's null then the hadoop configs + * must be in your classpath. + */ --- End diff -- This also (javadoc-style)
--- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastruct...@apache.org or file a JIRA ticket with INFRA. ---