Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Split qos0 and qos12 publish streams, add ISR #628

Merged
merged 3 commits into from
Dec 9, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
*/
package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream;

import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaAckMode.IN_SYNC_REPLICAS;
import static java.time.Instant.now;

import java.nio.ByteOrder;
Expand Down Expand Up @@ -278,6 +279,7 @@ private void onMqttBegin(
String topicName = mqttPublishBeginEx.topic().asString();
assert topicName != null;

final int qos = mqttPublishBeginEx.qos();

final String16FW clientId = mqttPublishBeginEx.clientId();
final MutableDirectBuffer clientIdBuffer = new UnsafeBuffer(new byte[clientId.sizeof() + 2]);
Expand Down Expand Up @@ -323,11 +325,11 @@ private void onMqttBegin(
.build();
}

messages.doKafkaBegin(traceId, authorization, affinity);
messages.doKafkaBegin(traceId, authorization, affinity, qos);
this.retainAvailable = (mqttPublishBeginEx.flags() & 1 << MqttPublishFlags.RETAIN.value()) != 0;
if (retainAvailable)
{
retained.doKafkaBegin(traceId, authorization, affinity);
retained.doKafkaBegin(traceId, authorization, affinity, qos);
}
}

Expand Down Expand Up @@ -802,15 +804,16 @@ private KafkaMessagesProxy(
private void doKafkaBegin(
long traceId,
long authorization,
long affinity)
long affinity,
int qos)
{
initialSeq = delegate.initialSeq;
initialAck = delegate.initialAck;
initialMax = delegate.initialMax;
state = MqttKafkaState.openingInitial(state);

kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax,
traceId, authorization, affinity, topic);
traceId, authorization, affinity, topic, qos);
}

private void doKafkaData(
Expand Down Expand Up @@ -1109,15 +1112,16 @@ private KafkaRetainedProxy(
private void doKafkaBegin(
long traceId,
long authorization,
long affinity)
long affinity,
int qos)
{
initialSeq = delegate.initialSeq;
initialAck = delegate.initialAck;
initialMax = delegate.initialMax;
state = MqttKafkaState.openingInitial(state);

kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax,
traceId, authorization, affinity, topic);
traceId, authorization, affinity, topic, qos);
}

private void doKafkaData(
Expand Down Expand Up @@ -1548,15 +1552,17 @@ private MessageConsumer newKafkaStream(
long traceId,
long authorization,
long affinity,
String16FW topic)
String16FW topic,
int qos)
{
final KafkaAckMode ackMode = qos > 0 ? IN_SYNC_REPLICAS : KAFKA_DEFAULT_ACK_MODE;
final KafkaBeginExFW kafkaBeginEx =
kafkaBeginExRW.wrap(writeBuffer, BeginFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity())
.typeId(kafkaTypeId)
.merged(m -> m.capabilities(c -> c.set(KafkaCapabilities.PRODUCE_ONLY))
.topic(topic)
.partitionsItem(p -> p.partitionId(-1).partitionOffset(-2L))
.ackMode(b -> b.set(KAFKA_DEFAULT_ACK_MODE)))
.ackMode(b -> b.set(ackMode)))
.build();


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -304,4 +304,15 @@ public void shouldSendMessageQos2() throws Exception
{
k3po.finish();
}

@Test
@Configuration("proxy.yaml")
@Configure(name = WILL_AVAILABLE_NAME, value = "false")
@Specification({
"${mqtt}/publish.mixture.qos/client",
"${kafka}/publish.mixture.qos/server"})
public void shouldSendMessageMixtureQos() throws Exception
{
k3po.finish();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -577,10 +577,13 @@ public MessageConsumer newStream(
return newStream;
}

private int topicKey(
String topic)
private long topicKey(
String topic,
int qos)
{
return System.identityHashCode(topic.intern());
final int topicHashCode = System.identityHashCode(topic.intern());
final int qosKey = qos > 0 ? MqttQoS.EXACTLY_ONCE.value() : qos;
return ((long) topicHashCode << 32) | (qosKey & 0xFFFFFFFFL);
}

private MessageConsumer newStream(
Expand Down Expand Up @@ -1225,12 +1228,12 @@ private int decodePublishV4(
if (reasonCode == SUCCESS)
{
final String topic = mqttPublishHeader.topic;
final int topicKey = topicKey(topic);
final long topicKey = topicKey(topic, qos);
MqttServer.MqttPublishStream publisher = server.publishes.get(topicKey);

if (publisher == null)
{
publisher = server.resolvePublishStream(traceId, authorization, topic);
publisher = server.resolvePublishStream(traceId, authorization, topic, qos);
if (publisher == null)
{
server.decodePublisherKey = 0;
Expand Down Expand Up @@ -1359,12 +1362,12 @@ private int decodePublishV5(
if (reasonCode == SUCCESS)
{
final String topic = mqttPublishHeader.topic;
final int topicKey = topicKey(topic);
final long topicKey = topicKey(topic, qos);
MqttServer.MqttPublishStream publisher = server.publishes.get(topicKey);

if (publisher == null)
{
publisher = server.resolvePublishStream(traceId, authorization, topic);
publisher = server.resolvePublishStream(traceId, authorization, topic, qos);
if (publisher == null)
{
server.decodePublisherKey = 0;
Expand Down Expand Up @@ -2284,7 +2287,7 @@ private final class MqttServer
private final long replyId;
private final long encodeBudgetId;

private final Int2ObjectHashMap<MqttPublishStream> publishes;
private final Long2ObjectHashMap<MqttPublishStream> publishes;
private final Long2ObjectHashMap<Int2ObjectHashMap<MqttSubscribeStream>> subscribes;
private final Int2ObjectHashMap<String> topicAliases;
private final Int2IntHashMap subscribePacketIds;
Expand Down Expand Up @@ -2319,7 +2322,7 @@ private final class MqttServer
private long encodeSlotTraceId;

private MqttServerDecoder decoder;
private int decodePublisherKey;
private long decodePublisherKey;
private int decodeablePacketBytes;

private long connectTimeoutId = NO_CANCEL_ID;
Expand Down Expand Up @@ -2375,7 +2378,7 @@ private MqttServer(
this.replyId = replyId;
this.encodeBudgetId = budgetId;
this.decoder = decodeInitialType;
this.publishes = new Int2ObjectHashMap<>();
this.publishes = new Long2ObjectHashMap<>();
this.subscribes = new Long2ObjectHashMap<>();
this.topicAliases = new Int2ObjectHashMap<>();
this.subscribePacketIds = new Int2IntHashMap(-1);
Expand Down Expand Up @@ -2968,7 +2971,8 @@ private int onDecodeConnectWillMessage(
private MqttPublishStream resolvePublishStream(
long traceId,
long authorization,
String topic)
String topic,
int qos)
{
MqttPublishStream stream = null;

Expand All @@ -2979,9 +2983,9 @@ private MqttPublishStream resolvePublishStream(
if (resolved != null)
{
final long resolvedId = resolved.id;
final int topicKey = topicKey(topic);
final long topicKey = topicKey(topic, qos);

stream = publishes.computeIfAbsent(topicKey, s -> new MqttPublishStream(routedId, resolvedId, topic));
stream = publishes.computeIfAbsent(topicKey, s -> new MqttPublishStream(routedId, resolvedId, topic, qos));
stream.doPublishBegin(traceId, affinity);
}
else
Expand Down Expand Up @@ -3024,7 +3028,7 @@ else if (mqttPublishHeaderRO.retained && !retainAvailable(capabilities))
unreleasedPacketIds.add(mqttPublishHeaderRO.packetId);
}

final int topicKey = topicKey(mqttPublishHeaderRO.topic);
final long topicKey = topicKey(mqttPublishHeaderRO.topic, mqttPublishHeaderRO.qos);
MqttPublishStream stream = publishes.get(topicKey);

final MqttDataExFW.Builder builder = mqttPublishDataExRW.wrap(dataExtBuffer, 0, dataExtBuffer.capacity())
Expand Down Expand Up @@ -5258,7 +5262,8 @@ public List<Subscription> subscriptions()
private class MqttPublishStream
{
private MessageConsumer application;
private final int topicKey;
private final long topicKey;
private final int qos;
private final String topic;
private final long originId;
private final long routedId;
Expand Down Expand Up @@ -5286,14 +5291,16 @@ private class MqttPublishStream
MqttPublishStream(
long originId,
long routedId,
String topic)
String topic,
int qos)
{
this.originId = originId;
this.routedId = routedId;
this.initialId = supplyInitialId.applyAsLong(routedId);
this.replyId = supplyReplyId.applyAsLong(initialId);
this.topic = topic;
this.topicKey = topicKey(topic);
this.qos = qos;
this.topicKey = topicKey(topic, qos);
}

private void doPublishBegin(
Expand All @@ -5310,7 +5317,8 @@ private void doPublishBegin(
.publish(p ->
p.clientId(clientId)
.topic(topic)
.flags(retainAvailable(capabilities) ? 1 : 0))
.flags(retainAvailable(capabilities) ? 1 : 0)
.qos(qos))
.build();

application = newStream(this::onPublish, originId, routedId, initialId, initialSeq, initialAck, initialMax,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -250,6 +250,16 @@ public void shouldPublishQoS2Message() throws Exception
k3po.finish();
}

@Test
@Configuration("server.yaml")
@Specification({
"${net}/publish.mixture.qos/client",
"${app}/publish.mixture.qos/server"})
public void shouldPublishMixtureQos() throws Exception
{
k3po.finish();
}

@Before
public void setSubscriptionId()
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -423,4 +423,14 @@ public void shouldPublishQoS2MessageAckWithReasoncode() throws Exception
{
k3po.finish();
}

@Test
@Configuration("server.yaml")
@Specification({
"${net}/publish.mixture.qos/client",
"${app}/publish.mixture.qos/server"})
public void shouldPublishMixtureQos() throws Exception
{
k3po.finish();
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
#
# Copyright 2021-2023 Aklivity Inc
#
# Licensed under the Aklivity Community License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at
#
# https://www.aklivity.io/aklivity-community-license/
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#

connect "zilla://streams/kafka0"
option zilla:window 8192
option zilla:transmission "duplex"

write zilla:begin.ext ${kafka:beginEx()
.typeId(zilla:id("kafka"))
.merged()
.capabilities("PRODUCE_ONLY")
.topic("mqtt-messages")
.partition(-1, -2)
.ackMode("LEADER_ONLY")
.build()
.build()}

connected

write zilla:data.ext ${kafka:dataEx()
.typeId(zilla:id("kafka"))
.merged()
.produce()
.deferred(0)
.partition(-1, -1)
.key("sensor/one")
.header("zilla:filter", "sensor")
.header("zilla:filter", "one")
.header("zilla:local", "client")
.header("zilla:qos", "0")
.build()
.build()}
write "message1"
write flush


connect "zilla://streams/kafka0"
option zilla:window 8192
option zilla:transmission "duplex"

write zilla:begin.ext ${kafka:beginEx()
.typeId(zilla:id("kafka"))
.merged()
.capabilities("PRODUCE_ONLY")
.topic("mqtt-messages")
.partition(-1, -2)
.ackMode("IN_SYNC_REPLICAS")
.build()
.build()}

connected

write zilla:data.ext ${kafka:dataEx()
.typeId(zilla:id("kafka"))
.merged()
.produce()
.deferred(0)
.partition(-1, -1)
.key("sensor/one")
.header("zilla:filter", "sensor")
.header("zilla:filter", "one")
.header("zilla:local", "client")
.header("zilla:qos", "2")
.build()
.build()}
write "message2"
write flush

write zilla:data.ext ${kafka:dataEx()
.typeId(zilla:id("kafka"))
.merged()
.produce()
.deferred(0)
.partition(-1, -1)
.key("sensor/one")
.header("zilla:filter", "sensor")
.header("zilla:filter", "one")
.header("zilla:local", "client")
.header("zilla:qos", "1")
.build()
.build()}
write "message3"
write flush

Loading