Github user ankitsinghal commented on a diff in the pull request: https://github.com/apache/phoenix/pull/153#discussion_r58187253 --- Diff: phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java --- @@ -3201,4 +3367,155 @@ private MetaDataMutationResult doDropFunction(long clientTimeStamp, List<byte[]> return new MetaDataMutationResult(MutationCode.FUNCTION_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null); } + + @Override + public void createSchema(RpcController controller, CreateSchemaRequest request, + RpcCallback<MetaDataResponse> done) { + MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); + String schemaName = null; + try { + List<Mutation> schemaMutations = ProtobufUtil.getMutations(request); + schemaName = request.getSchemaName(); + Mutation m = MetaDataUtil.getPutOnlyTableHeaderRow(schemaMutations); + + byte[] lockKey = m.getRow(); + Region region = env.getRegion(); + MetaDataMutationResult result = checkSchemaKeyInRegion(lockKey, region); + if (result != null) { + done.run(MetaDataMutationResult.toProto(result)); + return; + } + List<RowLock> locks = Lists.newArrayList(); + long clientTimeStamp = MetaDataUtil.getClientTimeStamp(schemaMutations); + try { + acquireLock(region, lockKey, locks); + // Get as of latest timestamp so we can detect if we have a + // newer function that already + // exists without making an additional query + ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(lockKey); + PSchema schema = loadSchema(env, lockKey, cacheKey, clientTimeStamp, clientTimeStamp); + if (schema != null) { + if (schema.getTimeStamp() < clientTimeStamp) { + builder.setReturnCode(MetaDataProtos.MutationCode.SCHEMA_ALREADY_EXISTS); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + builder.setSchema(PSchema.toProto(schema)); + done.run(builder.build()); + return; + } else { + builder.setReturnCode(MetaDataProtos.MutationCode.NEWER_SCHEMA_FOUND); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + builder.setSchema(PSchema.toProto(schema)); + done.run(builder.build()); + return; + } + } + region.mutateRowsWithLocks(schemaMutations, Collections.<byte[]> emptySet(), HConstants.NO_NONCE, + HConstants.NO_NONCE); + + // Invalidate the cache - the next getTable call will add it + // TODO: consider loading the table that was just created here, + // patching up the parent table, and updating the cache + Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env) + .getMetaDataCache(); + if (cacheKey != null) { + metaDataCache.invalidate(cacheKey); + } + + // Get timeStamp from mutations - the above method sets it if + // it's unset + long currentTimeStamp = MetaDataUtil.getClientTimeStamp(schemaMutations); + builder.setReturnCode(MetaDataProtos.MutationCode.SCHEMA_NOT_FOUND); + builder.setMutationTime(currentTimeStamp); + done.run(builder.build()); + return; + } finally { + region.releaseRowLocks(locks); + } + } catch (Throwable t) { + logger.error("createFunction failed", t); + ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(schemaName, t)); + } + } + + @Override + public void dropSchema(RpcController controller, DropSchemaRequest request, RpcCallback<MetaDataResponse> done) { + String schemaName = null; + try { + List<Mutation> schemaMetaData = ProtobufUtil.getMutations(request); + schemaName = request.getSchemaName(); + byte[] lockKey = SchemaUtil.getSchemaKey(schemaName); + Region region = env.getRegion(); + MetaDataMutationResult result = checkSchemaKeyInRegion(lockKey, region); + if (result != null) { + done.run(MetaDataMutationResult.toProto(result)); + return; + } + List<RowLock> locks = Lists.newArrayList(); + long clientTimeStamp = MetaDataUtil.getClientTimeStamp(schemaMetaData); + try { + acquireLock(region, lockKey, locks); + List<ImmutableBytesPtr> invalidateList = new ArrayList<ImmutableBytesPtr>(1); + result = doDropSchema(clientTimeStamp, schemaName, lockKey, schemaMetaData, invalidateList); + if (result.getMutationCode() != MutationCode.SCHEMA_ALREADY_EXISTS) { + done.run(MetaDataMutationResult.toProto(result)); + return; + } + region.mutateRowsWithLocks(schemaMetaData, Collections.<byte[]> emptySet(), HConstants.NO_NONCE, + HConstants.NO_NONCE); + Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env) + .getMetaDataCache(); + long currentTime = MetaDataUtil.getClientTimeStamp(schemaMetaData); + for (ImmutableBytesPtr ptr : invalidateList) { + metaDataCache.invalidate(ptr); + metaDataCache.put(ptr, newDeletedSchemaMarker(currentTime)); + } + done.run(MetaDataMutationResult.toProto(result)); + return; + } finally { + region.releaseRowLocks(locks); + } + } catch (Throwable t) { + logger.error("drop schema failed:", t); + ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(schemaName, t)); + } + } + + private MetaDataMutationResult doDropSchema(long clientTimeStamp, String schemaName, byte[] key, + List<Mutation> schemaMutations, List<ImmutableBytesPtr> invalidateList) throws IOException, SQLException { + PSchema schema = loadSchema(env, key, new ImmutableBytesPtr(key), clientTimeStamp, clientTimeStamp); + boolean areTablesExists = false; + if (schema == null) { return new MetaDataMutationResult(MutationCode.SCHEMA_NOT_FOUND, + EnvironmentEdgeManager.currentTimeMillis(), null); } + if (schema.getTimeStamp() < clientTimeStamp) { + Region region = env.getRegion(); + Scan scan = MetaDataUtil.newTableRowsScan(SchemaUtil.getKeyForSchema(null, schemaName), MIN_TABLE_TIMESTAMP, + clientTimeStamp); + List<Cell> results = Lists.newArrayList(); + try (RegionScanner scanner = region.getScanner(scan);) { + scanner.next(results); + if (results.isEmpty()) { // Should not be possible + return new MetaDataMutationResult(MutationCode.SCHEMA_NOT_FOUND, + EnvironmentEdgeManager.currentTimeMillis(), null); + } + do { + Cell kv = results.get(0); + if (Bytes.compareTo(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), key, 0, + key.length) != 0) { + areTablesExists = true; + break; + } + results.clear(); + scanner.next(results); + } while (!results.isEmpty()); + } + if (areTablesExists) { return new MetaDataMutationResult(MutationCode.UNALLOWED_SCHEMA_MUTATION, schema, + EnvironmentEdgeManager.currentTimeMillis()); } + + return new MetaDataMutationResult(MutationCode.SCHEMA_ALREADY_EXISTS, schema, + EnvironmentEdgeManager.currentTimeMillis()); + } + return new MetaDataMutationResult(MutationCode.SCHEMA_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), --- End diff -- I think SCHEMA_ALREADY_EXISTS is needed in if block , because we are checking if the schema timestamp is less than the clientstamp otherwise we are returning MutationCode.SCHEMA_NOT_FOUND
--- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastruct...@apache.org or file a JIRA ticket with INFRA. ---