[
https://issues.apache.org/jira/browse/STORM-885?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15018772#comment-15018772
]
ASF GitHub Bot commented on STORM-885:
--------------------------------------
Github user d2r commented on a diff in the pull request:
https://github.com/apache/storm/pull/838#discussion_r45517771
--- Diff:
storm-core/src/jvm/org/apache/storm/pacemaker/codec/ThriftEncoder.java ---
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.pacemaker.codec;
+
+import org.jboss.netty.handler.codec.oneone.OneToOneEncoder;
+import org.jboss.netty.channel.ChannelHandlerContext;
+import org.jboss.netty.channel.Channel;
+import backtype.storm.generated.HBMessage;
+import backtype.storm.generated.HBMessageData;
+import backtype.storm.generated.HBServerMessageType;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.jboss.netty.buffer.ChannelBuffer;
+import backtype.storm.utils.Utils;
+import backtype.storm.messaging.netty.ControlMessage;
+import backtype.storm.messaging.netty.SaslMessageToken;
+import backtype.storm.messaging.netty.INettySerializable;
+import java.io.IOException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.thrift.TBase;
+
+public class ThriftEncoder extends OneToOneEncoder {
+
+ private static final Logger LOG = LoggerFactory
+ .getLogger(ThriftEncoder.class);
+
+ private HBMessage encodeNettySerializable(INettySerializable
netty_message,
+ HBServerMessageType mType) {
+
+ HBMessageData message_data = new HBMessageData();
+ HBMessage m = new HBMessage();
+ try {
+ ChannelBuffer cbuffer = netty_message.buffer();
+ if(cbuffer.hasArray()) {
+ message_data.set_message_blob(cbuffer.array());
+ }
+ else {
+ byte buff[] = new byte[netty_message.encodeLength()];
+ cbuffer.readBytes(buff, 0, netty_message.encodeLength());
+ message_data.set_message_blob(buff);
+ }
+ m.set_type(mType);
+ m.set_data(message_data);
+ return m;
+ }
+ catch( IOException e) {
+ LOG.error("Failed to encode NettySerializable: ", e);
+ throw new RuntimeException(e);
+ }
+ }
+
+ @Override
+ protected Object encode(ChannelHandlerContext ctx, Channel channel,
Object msg) {
+ if(msg == null) return null;
+
+ LOG.debug("Trying to encode: " + msg.getClass().toString() + " : "
+ msg.toString());
+
+ HBMessage m;
+ if(msg instanceof INettySerializable) {
+ INettySerializable nettyMsg = (INettySerializable)msg;
+
+ HBServerMessageType type;
+ if(msg instanceof ControlMessage) {
+ type = HBServerMessageType.CONTROL_MESSAGE;
+ }
+ else if(msg instanceof SaslMessageToken) {
+ type = HBServerMessageType.SASL_MESSAGE_TOKEN;
+ }
+ else {
+ LOG.error("Didn't recognise INettySerializable: " +
nettyMsg.toString());
+ throw new RuntimeException("Unrecognized
INettySerializable.");
+ }
+ m = encodeNettySerializable(nettyMsg, type);
+ }
+ else {
+ m = (HBMessage)msg;
+ }
+
+ try {
+ byte serialized[] = Utils.thriftSerialize((TBase)m);
--- End diff --
already a `TBase`
> Heartbeat Server (Pacemaker)
> ----------------------------
>
> Key: STORM-885
> URL: https://issues.apache.org/jira/browse/STORM-885
> Project: Apache Storm
> Issue Type: Improvement
> Components: storm-core
> Reporter: Robert Joseph Evans
> Assignee: Kyle Nusbaum
>
> Large highly connected topologies and large clusters write a lot of data into
> ZooKeeper. The heartbeats, that make up the majority of this data, do not
> need to be persisted to disk. Pacemaker is intended to be a secure
> replacement for storing the heartbeats without changing anything within the
> heartbeats. In the future as more metrics are added in, we may want to look
> into switching it over to look more like Heron, where a metrics server is
> running for each node/topology. And can be used to aggregate/per-aggregate
> them in a more scalable manor.
--
This message was sent by Atlassian JIRA
(v6.3.4#6332)