``` ```
import org.apache.kafka.common.security.scram.internals.ScramFormatter;
import org.apache.kafka.common.security.scram.internals.ScramMechanism;
import java.nio.charset.StandardCharsets;
public class App {
public static String bytesToHex(byte[] in) {
final StringBuilder builder = new StringBuilder();
for(byte b : in) {
builder.append(String.format("0x%02x, ", b));
}
return builder.toString();
}
public static void main(String[] args) throws NoSuchAlgorithmException, InvalidKeyException {
int digestIterations = 4096;
String password = "hello";
byte[] salt = "world".getBytes(StandardCharsets.UTF_8);
byte[] saltedPassword = new ScramFormatter(ScramMechanism.SCRAM_SHA_256)
.saltedPassword(password, salt, digestIterations);
System.out.println(bytesToHex(saltedPassword));
}
}
*/
func TestScramSaltedPasswordSha512(t *testing.T) {
password := []byte("hello")
salt := []byte("world")
formatter := scramFormatter{mechanism: SCRAM_MECHANISM_SHA_512}
result, _ := formatter.saltedPassword(password, salt, 4096)
// calculated using ScramFormatter (see comment above)
expected := []byte{
0x35, 0x0c, 0x77, 0x84, 0x8a, 0x63, 0x06, 0x92, 0x00,
0x6e, 0xc6, 0x6a, 0x0c, 0x39, 0xeb, 0xb0, 0x00, 0xd3,
0xf8, 0x8a, 0x94, 0xae, 0x7f, 0x8c, 0xcd, 0x1d, 0x92,
0x52, 0x6c, 0x5b, 0x16, 0x15, 0x86, 0x3b, 0xde, 0xa1,
0x6c, 0x12, 0x9a, 0x7b, 0x09, 0xed, 0x0e, 0x38, 0xf2,
0x07, 0x4d, 0x2f, 0xe2, 0x9f, 0x0f, 0x41, 0xe1, 0xfb,
0x00, 0xc1, 0xd3, 0xbd, 0xd3, 0xfd, 0x51, 0x0b, 0xa9,
0x8f,
}
if !bytes.Equal(result, expected) {
t.Errorf("saltedPassword SHA-512 failed, expected: %v, result: %v", expected, result)
}
}
func TestScramSaltedPasswordSha256(t *testing.T) {
password := []byte("hello")
salt := []byte("world")
formatter := scramFormatter{mechanism: SCRAM_MECHANISM_SHA_256}
result, _ := formatter.saltedPassword(password, salt, 4096)
// calculated using ScramFormatter (see comment above)
expected := []byte{
0xc1, 0x55, 0x53, 0x03, 0xda, 0x30, 0x9f, 0x6b, 0x7d,
0x1e, 0x8f, 0xe4, 0x56, 0x36, 0xbf, 0xdd, 0xdc, 0x4b,
0xf5, 0x64, 0x05, 0xe7, 0xe9, 0x4e, 0x9d, 0x15, 0xf0,
0xe7, 0xb9, 0xcb, 0xd3, 0x80,
}
if !bytes.Equal(result, expected) {
t.Errorf("saltedPassword SHA-256 failed, expected: %v, result: %v", expected, result)
}
}
golang-github-ibm-sarama-1.46.2/server.properties 0000664 0000000 0000000 00000015360 15072577001 0021777 0 ustar 00root root 0000000 0000000 # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This configuration file is intended for use in ZK-based mode, where Apache ZooKeeper is required.
# See kafka.server.KafkaConfig for additional details and defaults
#
############################# Server Basics #############################
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=0
############################# Socket Server Settings #############################
# The address the socket server listens on. If not configured, the host name will be equal to the value of
# java.net.InetAddress.getCanonicalHostName(), with PLAINTEXT listener name, and port 9092.
# FORMAT:
# listeners = listener_name://host_name:port
# EXAMPLE:
# listeners = PLAINTEXT://your.host.name:9092
#listeners=PLAINTEXT://:9092
# Listener name, hostname and port the broker will advertise to clients.
# If not set, it uses the value for "listeners".
#advertised.listeners=PLAINTEXT://your.host.name:9092
# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
# The number of threads that the server uses for receiving requests from the network and sending responses to the network
num.network.threads=3
# The number of threads that the server uses for processing requests, which may include disk I/O
num.io.threads=8
# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=102400
# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=102400
# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600
############################# Log Basics #############################
# A comma separated list of directories under which to store log files
log.dirs=/tmp/kafka-logs
# The default number of log partitions per topic. More partitions allow greater
# parallelism for consumption, but this will also result in more files across
# the brokers.
num.partitions=1
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
# This value is recommended to be increased for installations with data dirs located in RAID array.
num.recovery.threads.per.data.dir=1
############################# Internal Topic Settings #############################
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
############################# Log Flush Policy #############################
# Messages are immediately written to the filesystem but by default we only fsync() to sync
# the OS cache lazily. The following configurations control the flush of data to disk.
# There are a few important trade-offs here:
# 1. Durability: Unflushed data may be lost if you are not using replication.
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
# The number of messages to accept before forcing a flush of data to disk
#log.flush.interval.messages=10000
# The maximum amount of time a message can sit in a log before we force a flush
#log.flush.interval.ms=1000
############################# Log Retention Policy #############################
# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.
# The minimum age of a log file to be eligible for deletion due to age
log.retention.hours=168
# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
#log.retention.bytes=1073741824
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
#log.segment.bytes=1073741824
# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.retention.check.interval.ms=300000
############################# Zookeeper #############################
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=localhost:2181
# Timeout in ms for connecting to zookeeper
zookeeper.connection.timeout.ms=18000
############################# Group Coordinator Settings #############################
# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
# The default value for this is 3 seconds.
# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
group.initial.rebalance.delay.ms=0
golang-github-ibm-sarama-1.46.2/sticky_assignor_user_data.go 0000664 0000000 0000000 00000006350 15072577001 0024143 0 ustar 00root root 0000000 0000000 package sarama
type topicPartitionAssignment struct {
Topic string
Partition int32
}
type StickyAssignorUserData interface {
partitions() []topicPartitionAssignment
hasGeneration() bool
generation() int
}
// StickyAssignorUserDataV0 holds topic partition information for an assignment
type StickyAssignorUserDataV0 struct {
Topics map[string][]int32
topicPartitions []topicPartitionAssignment
}
func (m *StickyAssignorUserDataV0) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(m.Topics)); err != nil {
return err
}
for topic, partitions := range m.Topics {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putInt32Array(partitions); err != nil {
return err
}
}
return nil
}
func (m *StickyAssignorUserDataV0) decode(pd packetDecoder) (err error) {
var topicLen int
if topicLen, err = pd.getArrayLength(); err != nil {
return
}
m.Topics = make(map[string][]int32, topicLen)
for i := 0; i < topicLen; i++ {
var topic string
if topic, err = pd.getString(); err != nil {
return
}
if m.Topics[topic], err = pd.getInt32Array(); err != nil {
return
}
}
m.topicPartitions = populateTopicPartitions(m.Topics)
return nil
}
func (m *StickyAssignorUserDataV0) partitions() []topicPartitionAssignment { return m.topicPartitions }
func (m *StickyAssignorUserDataV0) hasGeneration() bool { return false }
func (m *StickyAssignorUserDataV0) generation() int { return defaultGeneration }
// StickyAssignorUserDataV1 holds topic partition information for an assignment
type StickyAssignorUserDataV1 struct {
Topics map[string][]int32
Generation int32
topicPartitions []topicPartitionAssignment
}
func (m *StickyAssignorUserDataV1) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(m.Topics)); err != nil {
return err
}
for topic, partitions := range m.Topics {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putInt32Array(partitions); err != nil {
return err
}
}
pe.putInt32(m.Generation)
return nil
}
func (m *StickyAssignorUserDataV1) decode(pd packetDecoder) (err error) {
var topicLen int
if topicLen, err = pd.getArrayLength(); err != nil {
return
}
m.Topics = make(map[string][]int32, topicLen)
for i := 0; i < topicLen; i++ {
var topic string
if topic, err = pd.getString(); err != nil {
return
}
if m.Topics[topic], err = pd.getInt32Array(); err != nil {
return
}
}
m.Generation, err = pd.getInt32()
if err != nil {
return err
}
m.topicPartitions = populateTopicPartitions(m.Topics)
return nil
}
func (m *StickyAssignorUserDataV1) partitions() []topicPartitionAssignment { return m.topicPartitions }
func (m *StickyAssignorUserDataV1) hasGeneration() bool { return true }
func (m *StickyAssignorUserDataV1) generation() int { return int(m.Generation) }
func populateTopicPartitions(topics map[string][]int32) []topicPartitionAssignment {
topicPartitions := make([]topicPartitionAssignment, 0)
for topic, partitions := range topics {
for _, partition := range partitions {
topicPartitions = append(topicPartitions, topicPartitionAssignment{Topic: topic, Partition: partition})
}
}
return topicPartitions
}
golang-github-ibm-sarama-1.46.2/sticky_assignor_user_data_test.go 0000664 0000000 0000000 00000003407 15072577001 0025202 0 ustar 00root root 0000000 0000000 //go:build !functional
package sarama
import (
"encoding/base64"
"testing"
)
func TestStickyAssignorUserDataV0(t *testing.T) {
// Single topic with deterministic ordering across encode-decode
req := &StickyAssignorUserDataV0{}
data := decodeUserDataBytes(t, "AAAAAQADdDAzAAAAAQAAAAU=")
testDecodable(t, "", req, data)
testEncodable(t, "", req, data)
// Multiple partitions
req = &StickyAssignorUserDataV0{}
data = decodeUserDataBytes(t, "AAAAAQADdDE4AAAAEgAAAAAAAAABAAAAAgAAAAMAAAAEAAAABQAAAAYAAAAHAAAACAAAAAkAAAAKAAAACwAAAAwAAAANAAAADgAAAA8AAAAQAAAAEQ==")
testDecodable(t, "", req, data)
// Multiple topics and partitions
req = &StickyAssignorUserDataV0{}
data = decodeUserDataBytes(t, "AAAABQADdDEyAAAAAgAAAAIAAAAKAAN0MTEAAAABAAAABAADdDE0AAAAAQAAAAgAA3QxMwAAAAEAAAANAAN0MDkAAAABAAAABQ==")
testDecodable(t, "", req, data)
}
func TestStickyAssignorUserDataV1(t *testing.T) {
// Single topic with deterministic ordering across encode-decode
req := &StickyAssignorUserDataV1{}
data := decodeUserDataBytes(t, "AAAAAQADdDA2AAAAAgAAAAAAAAAE/////w==")
testDecodable(t, "", req, data)
testEncodable(t, "", req, data)
// Multiple topics and partitions
req = &StickyAssignorUserDataV1{}
data = decodeUserDataBytes(t, "AAAABgADdDEwAAAAAgAAAAIAAAAJAAN0MTIAAAACAAAAAwAAAAsAA3QxNAAAAAEAAAAEAAN0MTMAAAABAAAACwADdDE1AAAAAQAAAAwAA3QwOQAAAAEAAAAG/////w==")
testDecodable(t, "", req, data)
// Generation is populated
req = &StickyAssignorUserDataV1{}
data = decodeUserDataBytes(t, "AAAAAQAHdG9waWMwMQAAAAMAAAAAAAAAAQAAAAIAAAAB")
testDecodable(t, "", req, data)
}
func decodeUserDataBytes(t *testing.T, base64Data string) []byte {
data, err := base64.StdEncoding.DecodeString(base64Data)
if err != nil {
t.Errorf("Error decoding data: %v", err)
t.FailNow()
}
return data
}
golang-github-ibm-sarama-1.46.2/sync_group_request.go 0000664 0000000 0000000 00000007730 15072577001 0022644 0 ustar 00root root 0000000 0000000 package sarama
type SyncGroupRequestAssignment struct {
// MemberId contains the ID of the member to assign.
MemberId string
// Assignment contains the member assignment.
Assignment []byte
}
func (a *SyncGroupRequestAssignment) encode(pe packetEncoder, version int16) (err error) {
if err := pe.putString(a.MemberId); err != nil {
return err
}
if err := pe.putBytes(a.Assignment); err != nil {
return err
}
pe.putEmptyTaggedFieldArray()
return nil
}
func (a *SyncGroupRequestAssignment) decode(pd packetDecoder, version int16) (err error) {
if a.MemberId, err = pd.getString(); err != nil {
return err
}
if a.Assignment, err = pd.getBytes(); err != nil {
return err
}
_, err = pd.getEmptyTaggedFieldArray()
return err
}
type SyncGroupRequest struct {
// Version defines the protocol version to use for encode and decode
Version int16
// GroupId contains the unique group identifier.
GroupId string
// GenerationId contains the generation of the group.
GenerationId int32
// MemberId contains the member ID assigned by the group.
MemberId string
// GroupInstanceId contains the unique identifier of the consumer instance provided by end user.
GroupInstanceId *string
// GroupAssignments contains each assignment.
GroupAssignments []SyncGroupRequestAssignment
}
func (s *SyncGroupRequest) setVersion(v int16) {
s.Version = v
}
func (s *SyncGroupRequest) encode(pe packetEncoder) (err error) {
if err := pe.putString(s.GroupId); err != nil {
return err
}
pe.putInt32(s.GenerationId)
if err := pe.putString(s.MemberId); err != nil {
return err
}
if s.Version >= 3 {
if err := pe.putNullableString(s.GroupInstanceId); err != nil {
return err
}
}
if err := pe.putArrayLength(len(s.GroupAssignments)); err != nil {
return err
}
for _, block := range s.GroupAssignments {
if err := block.encode(pe, s.Version); err != nil {
return err
}
}
pe.putEmptyTaggedFieldArray()
return nil
}
func (s *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) {
s.Version = version
if s.GroupId, err = pd.getString(); err != nil {
return err
}
if s.GenerationId, err = pd.getInt32(); err != nil {
return err
}
if s.MemberId, err = pd.getString(); err != nil {
return err
}
if s.Version >= 3 {
if s.GroupInstanceId, err = pd.getNullableString(); err != nil {
return err
}
}
if numAssignments, err := pd.getArrayLength(); err != nil {
return err
} else if numAssignments > 0 {
s.GroupAssignments = make([]SyncGroupRequestAssignment, numAssignments)
for i := 0; i < numAssignments; i++ {
var block SyncGroupRequestAssignment
if err := block.decode(pd, s.Version); err != nil {
return err
}
s.GroupAssignments[i] = block
}
}
_, err = pd.getEmptyTaggedFieldArray()
return err
}
func (r *SyncGroupRequest) key() int16 {
return apiKeySyncGroup
}
func (r *SyncGroupRequest) version() int16 {
return r.Version
}
func (r *SyncGroupRequest) headerVersion() int16 {
if r.Version >= 4 {
return 2
}
return 1
}
func (r *SyncGroupRequest) isValidVersion() bool {
return r.Version >= 0 && r.Version <= 4
}
func (r *SyncGroupRequest) isFlexible() bool {
return r.isFlexibleVersion(r.Version)
}
func (r *SyncGroupRequest) isFlexibleVersion(version int16) bool {
return version >= 4
}
func (r *SyncGroupRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 4:
return V2_4_0_0
case 3:
return V2_3_0_0
case 2:
return V2_0_0_0
case 1:
return V0_11_0_0
case 0:
return V0_9_0_0
default:
return V2_3_0_0
}
}
func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) {
r.GroupAssignments = append(r.GroupAssignments, SyncGroupRequestAssignment{
MemberId: memberId,
Assignment: memberAssignment,
})
}
func (r *SyncGroupRequest) AddGroupAssignmentMember(
memberId string,
memberAssignment *ConsumerGroupMemberAssignment,
) error {
bin, err := encode(memberAssignment, nil)
if err != nil {
return err
}
r.AddGroupAssignment(memberId, bin)
return nil
}
golang-github-ibm-sarama-1.46.2/sync_group_request_test.go 0000664 0000000 0000000 00000005647 15072577001 0023710 0 ustar 00root root 0000000 0000000 //go:build !functional
package sarama
import (
"reflect"
"testing"
)
var (
emptySyncGroupRequest = []byte{
0, 3, 'f', 'o', 'o', // Group ID
0x00, 0x01, 0x02, 0x03, // Generation ID
0, 3, 'b', 'a', 'z', // Member ID
0, 0, 0, 0, // no assignments
}
populatedSyncGroupRequest = []byte{
0, 3, 'f', 'o', 'o', // Group ID
0x00, 0x01, 0x02, 0x03, // Generation ID
0, 3, 'b', 'a', 'z', // Member ID
0, 0, 0, 1, // one assignment
0, 3, 'b', 'a', 'z', // Member ID
0, 0, 0, 3, 'f', 'o', 'o', // Member assignment
}
)
func TestSyncGroupRequest(t *testing.T) {
var request *SyncGroupRequest
request = new(SyncGroupRequest)
request.GroupId = "foo"
request.GenerationId = 66051
request.MemberId = "baz"
testRequest(t, "empty", request, emptySyncGroupRequest)
request = new(SyncGroupRequest)
request.GroupId = "foo"
request.GenerationId = 66051
request.MemberId = "baz"
request.AddGroupAssignment("baz", []byte("foo"))
testRequest(t, "populated", request, populatedSyncGroupRequest)
}
var (
populatedSyncGroupRequestV3 = []byte{
0, 3, 'f', 'o', 'o', // Group ID
0x00, 0x01, 0x02, 0x03, // Generation ID
0, 3, 'b', 'a', 'z', // Member ID
0, 3, 'g', 'i', 'd', // GroupInstance ID
0, 0, 0, 1, // one assignment
0, 3, 'b', 'a', 'z', // Member ID
0, 0, 0, 3, 'f', 'o', 'o', // Member assignment
}
populatedSyncGroupRequestV4 = []byte{
4, 'f', 'o', 'o', // Group ID
0x00, 0x01, 0x02, 0x03, // Generation ID
4, 'b', 'a', 'z', // Member ID
4, 'g', 'i', 'd', // GroupInstance ID
2, // 1 + one assignment
4, 'b', 'a', 'z', // Member ID
4, 'f', 'o', 'o', // Member assignment
0, // empty tagged fields
0, // empty tagged fields
}
)
func TestSyncGroupRequestV3AndPlus(t *testing.T) {
groupInstanceId := "gid"
tests := []struct {
CaseName string
Version int16
MessageBytes []byte
Message *SyncGroupRequest
}{
{
"v3",
3,
populatedSyncGroupRequestV3,
&SyncGroupRequest{
Version: 3,
GroupId: "foo",
GenerationId: 0x00010203,
MemberId: "baz",
GroupInstanceId: &groupInstanceId,
GroupAssignments: []SyncGroupRequestAssignment{
{
MemberId: "baz",
Assignment: []byte("foo"),
},
},
},
},
{
"v4",
4,
populatedSyncGroupRequestV4,
&SyncGroupRequest{
Version: 4,
GroupId: "foo",
GenerationId: 0x00010203,
MemberId: "baz",
GroupInstanceId: &groupInstanceId,
GroupAssignments: []SyncGroupRequestAssignment{
{
MemberId: "baz",
Assignment: []byte("foo"),
},
},
},
},
}
for _, c := range tests {
request := new(SyncGroupRequest)
testVersionDecodable(t, c.CaseName, request, c.MessageBytes, c.Version)
if !reflect.DeepEqual(c.Message, request) {
t.Errorf("case %s decode failed, expected:%+v got %+v", c.CaseName, c.Message, request)
}
testEncodable(t, c.CaseName, c.Message, c.MessageBytes)
}
}
golang-github-ibm-sarama-1.46.2/sync_group_response.go 0000664 0000000 0000000 00000004416 15072577001 0023010 0 ustar 00root root 0000000 0000000 package sarama
import "time"
type SyncGroupResponse struct {
// Version defines the protocol version to use for encode and decode
Version int16
// ThrottleTime contains the duration in milliseconds for which the
// request was throttled due to a quota violation, or zero if the request
// did not violate any quota.
ThrottleTime int32
// Err contains the error code, or 0 if there was no error.
Err KError
// MemberAssignment contains the member assignment.
MemberAssignment []byte
}
func (r *SyncGroupResponse) setVersion(v int16) {
r.Version = v
}
func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
assignment := new(ConsumerGroupMemberAssignment)
err := decode(r.MemberAssignment, assignment, nil)
return assignment, err
}
func (r *SyncGroupResponse) encode(pe packetEncoder) error {
if r.Version >= 1 {
pe.putInt32(r.ThrottleTime)
}
pe.putKError(r.Err)
if err := pe.putBytes(r.MemberAssignment); err != nil {
return err
}
pe.putEmptyTaggedFieldArray()
return nil
}
func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.Version >= 1 {
if r.ThrottleTime, err = pd.getInt32(); err != nil {
return err
}
}
r.Err, err = pd.getKError()
if err != nil {
return err
}
r.MemberAssignment, err = pd.getBytes()
if err != nil {
return err
}
_, err = pd.getEmptyTaggedFieldArray()
return err
}
func (r *SyncGroupResponse) key() int16 {
return apiKeySyncGroup
}
func (r *SyncGroupResponse) version() int16 {
return r.Version
}
func (r *SyncGroupResponse) headerVersion() int16 {
if r.Version >= 4 {
return 1
}
return 0
}
func (r *SyncGroupResponse) isValidVersion() bool {
return r.Version >= 0 && r.Version <= 4
}
func (r *SyncGroupResponse) isFlexible() bool {
return r.isFlexibleVersion(r.Version)
}
func (r *SyncGroupResponse) isFlexibleVersion(version int16) bool {
return version >= 4
}
func (r *SyncGroupResponse) requiredVersion() KafkaVersion {
switch r.Version {
case 4:
return V2_4_0_0
case 3:
return V2_3_0_0
case 2:
return V2_0_0_0
case 1:
return V0_11_0_0
case 0:
return V0_9_0_0
default:
return V2_3_0_0
}
}
func (r *SyncGroupResponse) throttleTime() time.Duration {
return time.Duration(r.ThrottleTime) * time.Millisecond
}
golang-github-ibm-sarama-1.46.2/sync_group_response_test.go 0000664 0000000 0000000 00000003713 15072577001 0024046 0 ustar 00root root 0000000 0000000 //go:build !functional
package sarama
import (
"reflect"
"testing"
)
var (
syncGroupResponseV0NoError = []byte{
0x00, 0x00, // No error
0, 0, 0, 3, 0x01, 0x02, 0x03, // Member assignment data
}
syncGroupResponseV0WithError = []byte{
0, 27, // ErrRebalanceInProgress
0, 0, 0, 0, // No member assignment data
}
syncGroupResponseV1NoError = []byte{
0, 0, 0, 100, // ThrottleTimeMs
0x00, 0x00, // No error
0, 0, 0, 3, 0x01, 0x02, 0x03, // Member assignment data
}
syncGroupResponseV4NoError = []byte{
0, 0, 0, 100, // ThrottleTimeMs
0x00, 0x00, // No error
4, 0x01, 0x02, 0x03, // Member assignment data
0, // empty tagged fields
}
)
func TestSyncGroupResponse(t *testing.T) {
tests := []struct {
CaseName string
Version int16
MessageBytes []byte
Message *SyncGroupResponse
}{
{
"v0-noErr",
0,
syncGroupResponseV0NoError,
&SyncGroupResponse{
Version: 0,
Err: ErrNoError,
MemberAssignment: []byte{1, 2, 3},
},
},
{
"v0-Err",
0,
syncGroupResponseV0WithError,
&SyncGroupResponse{
Version: 0,
Err: ErrRebalanceInProgress,
MemberAssignment: []byte{},
},
},
{
"v1-noErr",
1,
syncGroupResponseV1NoError,
&SyncGroupResponse{
ThrottleTime: 100,
Version: 1,
Err: ErrNoError,
MemberAssignment: []byte{1, 2, 3},
},
},
{
"v4-noErr",
4,
syncGroupResponseV4NoError,
&SyncGroupResponse{
ThrottleTime: 100,
Version: 4,
Err: ErrNoError,
MemberAssignment: []byte{1, 2, 3},
},
},
}
for _, c := range tests {
response := new(SyncGroupResponse)
testVersionDecodable(t, c.CaseName, response, c.MessageBytes, c.Version)
if !reflect.DeepEqual(c.Message, response) {
t.Errorf("case %s decode failed, expected:%+v got %+v", c.CaseName, c.Message, response)
}
testEncodable(t, c.CaseName, c.Message, c.MessageBytes)
}
}
golang-github-ibm-sarama-1.46.2/sync_producer.go 0000664 0000000 0000000 00000014274 15072577001 0021564 0 ustar 00root root 0000000 0000000 package sarama
import "sync"
var expectationsPool = sync.Pool{
New: func() interface{} {
return make(chan *ProducerError, 1)
},
}
// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct
// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer
// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope.
//
// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual
// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`.
// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost.
//
// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to
// be set to true in its configuration.
type SyncProducer interface {
// SendMessage produces a given message, and returns only when it either has
// succeeded or failed to produce. It will return the partition and the offset
// of the produced message, or an error if the message failed to produce.
SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error)
// SendMessages produces a given set of messages, and returns only when all
// messages in the set have either succeeded or failed. Note that messages
// can succeed and fail individually; if some succeed and some fail,
// SendMessages will return an error.
SendMessages(msgs []*ProducerMessage) error
// Close shuts down the producer; you must call this function before a producer
// object passes out of scope, as it may otherwise leak memory.
// You must call this before calling Close on the underlying client.
Close() error
// TxnStatus return current producer transaction status.
TxnStatus() ProducerTxnStatusFlag
// IsTransactional return true when current producer is transactional.
IsTransactional() bool
// BeginTxn mark current transaction as ready.
BeginTxn() error
// CommitTxn commit current transaction.
CommitTxn() error
// AbortTxn abort current transaction.
AbortTxn() error
// AddOffsetsToTxn add associated offsets to current transaction.
AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error
// AddMessageToTxn add message offsets to current transaction.
AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error
}
type syncProducer struct {
producer *asyncProducer
wg sync.WaitGroup
}
// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration.
func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) {
if config == nil {
config = NewConfig()
config.Producer.Return.Successes = true
}
if err := verifyProducerConfig(config); err != nil {
return nil, err
}
p, err := NewAsyncProducer(addrs, config)
if err != nil {
return nil, err
}
return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
}
// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this producer.
func NewSyncProducerFromClient(client Client) (SyncProducer, error) {
if err := verifyProducerConfig(client.Config()); err != nil {
return nil, err
}
p, err := NewAsyncProducerFromClient(client)
if err != nil {
return nil, err
}
return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
}
func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer {
sp := &syncProducer{producer: p}
sp.wg.Add(2)
go withRecover(sp.handleSuccesses)
go withRecover(sp.handleErrors)
return sp
}
func verifyProducerConfig(config *Config) error {
if !config.Producer.Return.Errors {
return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer")
}
if !config.Producer.Return.Successes {
return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer")
}
return nil
}
func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) {
expectation := expectationsPool.Get().(chan *ProducerError)
msg.expectation = expectation
sp.producer.Input() <- msg
pErr := <-expectation
msg.expectation = nil
expectationsPool.Put(expectation)
if pErr != nil {
return -1, -1, pErr.Err
}
return msg.Partition, msg.Offset, nil
}
func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error {
indices := make(chan int, len(msgs))
go func() {
for i, msg := range msgs {
expectation := expectationsPool.Get().(chan *ProducerError)
msg.expectation = expectation
sp.producer.Input() <- msg
indices <- i
}
close(indices)
}()
var errors ProducerErrors
for i := range indices {
expectation := msgs[i].expectation
pErr := <-expectation
msgs[i].expectation = nil
expectationsPool.Put(expectation)
if pErr != nil {
errors = append(errors, pErr)
}
}
if len(errors) > 0 {
return errors
}
return nil
}
func (sp *syncProducer) handleSuccesses() {
defer sp.wg.Done()
for msg := range sp.producer.Successes() {
expectation := msg.expectation
expectation <- nil
}
}
func (sp *syncProducer) handleErrors() {
defer sp.wg.Done()
for err := range sp.producer.Errors() {
expectation := err.Msg.expectation
expectation <- err
}
}
func (sp *syncProducer) Close() error {
sp.producer.AsyncClose()
sp.wg.Wait()
return nil
}
func (sp *syncProducer) IsTransactional() bool {
return sp.producer.IsTransactional()
}
func (sp *syncProducer) BeginTxn() error {
return sp.producer.BeginTxn()
}
func (sp *syncProducer) CommitTxn() error {
return sp.producer.CommitTxn()
}
func (sp *syncProducer) AbortTxn() error {
return sp.producer.AbortTxn()
}
func (sp *syncProducer) AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error {
return sp.producer.AddOffsetsToTxn(offsets, groupId)
}
func (sp *syncProducer) AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error {
return sp.producer.AddMessageToTxn(msg, groupId, metadata)
}
func (p *syncProducer) TxnStatus() ProducerTxnStatusFlag {
return p.producer.TxnStatus()
}
golang-github-ibm-sarama-1.46.2/sync_producer_test.go 0000664 0000000 0000000 00000022174 15072577001 0022621 0 ustar 00root root 0000000 0000000 //go:build !functional
package sarama
import (
"errors"
"log"
"sync"
"testing"
)
func TestSyncProducer(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
leader := NewMockBroker(t, 2)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
for i := 0; i < 10; i++ {
leader.Returns(prodSuccess)
}
config := NewTestConfig()
config.Producer.Return.Successes = true
producer, err := NewSyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
msg := &ProducerMessage{
Topic: "my_topic",
Value: StringEncoder(TestMessage),
Metadata: "test",
}
partition, offset, err := producer.SendMessage(msg)
if partition != 0 || msg.Partition != partition {
t.Error("Unexpected partition")
}
if offset != 0 || msg.Offset != offset {
t.Error("Unexpected offset")
}
if str, ok := msg.Metadata.(string); !ok || str != "test" {
t.Error("Unexpected metadata")
}
if err != nil {
t.Error(err)
}
}
safeClose(t, producer)
leader.Close()
seedBroker.Close()
}
func TestSyncProducerTransactional(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
defer seedBroker.Close()
leader := NewMockBroker(t, 2)
defer leader.Close()
config := NewTestConfig()
config.Version = V0_11_0_0
config.Producer.RequiredAcks = WaitForAll
config.Producer.Return.Successes = true
config.Producer.Transaction.ID = "test"
config.Producer.Idempotent = true
config.Producer.Retry.Max = 5
config.Net.MaxOpenRequests = 1
metadataResponse := new(MetadataResponse)
metadataResponse.Version = 4
metadataResponse.ControllerID = leader.BrokerID()
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
metadataResponse.AddTopic("my_topic", ErrNoError)
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
client, err := NewClient([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
defer safeClose(t, client)
findCoordinatorResponse := new(FindCoordinatorResponse)
findCoordinatorResponse.Coordinator = client.Brokers()[0]
findCoordinatorResponse.Version = 1
leader.Returns(findCoordinatorResponse)
initProducerIdResponse := new(InitProducerIDResponse)
leader.Returns(initProducerIdResponse)
addPartitionToTxn := new(AddPartitionsToTxnResponse)
addPartitionToTxn.Errors = map[string][]*PartitionError{
"my_topic": {
{
Partition: 0,
},
},
}
leader.Returns(addPartitionToTxn)
prodSuccess := new(ProduceResponse)
prodSuccess.Version = 3
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
for i := 0; i < 10; i++ {
leader.Returns(prodSuccess)
}
endTxnResponse := &EndTxnResponse{}
leader.Returns(endTxnResponse)
producer, err := NewSyncProducerFromClient(client)
if err != nil {
t.Fatal(err)
}
if !producer.IsTransactional() {
t.Error("producer is not transactional")
}
err = producer.BeginTxn()
if err != nil {
t.Fatal(err)
}
if producer.TxnStatus()&ProducerTxnFlagInTransaction == 0 {
t.Error("transaction must started")
}
for i := 0; i < 10; i++ {
msg := &ProducerMessage{
Topic: "my_topic",
Value: StringEncoder(TestMessage),
Metadata: "test",
}
partition, offset, err := producer.SendMessage(msg)
if partition != 0 || msg.Partition != partition {
t.Error("Unexpected partition")
}
if offset != 0 || msg.Offset != offset {
t.Error("Unexpected offset")
}
if str, ok := msg.Metadata.(string); !ok || str != "test" {
t.Error("Unexpected metadata")
}
if err != nil {
t.Error(err)
}
}
err = producer.CommitTxn()
if err != nil {
t.Fatal(err)
}
safeClose(t, producer)
}
func TestSyncProducerBatch(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
leader := NewMockBroker(t, 2)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader.Returns(prodSuccess)
config := NewTestConfig()
config.Producer.Flush.Messages = 3
config.Producer.Return.Successes = true
producer, err := NewSyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
err = producer.SendMessages([]*ProducerMessage{
{
Topic: "my_topic",
Value: StringEncoder(TestMessage),
Metadata: "test",
},
{
Topic: "my_topic",
Value: StringEncoder(TestMessage),
Metadata: "test",
},
{
Topic: "my_topic",
Value: StringEncoder(TestMessage),
Metadata: "test",
},
})
if err != nil {
t.Error(err)
}
safeClose(t, producer)
leader.Close()
seedBroker.Close()
}
func TestConcurrentSyncProducer(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
leader := NewMockBroker(t, 2)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader.Returns(prodSuccess)
config := NewTestConfig()
config.Producer.Flush.Messages = 100
config.Producer.Return.Successes = true
producer, err := NewSyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
wg := sync.WaitGroup{}
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
msg := &ProducerMessage{Topic: "my_topic", Value: StringEncoder(TestMessage)}
partition, _, err := producer.SendMessage(msg)
if partition != 0 {
t.Error("Unexpected partition")
}
if err != nil {
t.Error(err)
}
wg.Done()
}()
}
wg.Wait()
safeClose(t, producer)
leader.Close()
seedBroker.Close()
}
func TestSyncProducerToNonExistingTopic(t *testing.T) {
broker := NewMockBroker(t, 1)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(broker.Addr(), broker.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError)
broker.Returns(metadataResponse)
config := NewTestConfig()
config.Metadata.Retry.Max = 0
config.Producer.Retry.Max = 0
config.Producer.Return.Successes = true
producer, err := NewSyncProducer([]string{broker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
metadataResponse = new(MetadataResponse)
metadataResponse.AddBroker(broker.Addr(), broker.BrokerID())
metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition)
broker.Returns(metadataResponse)
_, _, err = producer.SendMessage(&ProducerMessage{Topic: "unknown"})
if !errors.Is(err, ErrUnknownTopicOrPartition) {
t.Error("Uxpected ErrUnknownTopicOrPartition, found:", err)
}
safeClose(t, producer)
broker.Close()
}
func TestSyncProducerRecoveryWithRetriesDisabled(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
leader1 := NewMockBroker(t, 2)
leader2 := NewMockBroker(t, 3)
metadataLeader1 := new(MetadataResponse)
metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID())
metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, nil, ErrNoError)
seedBroker.Returns(metadataLeader1)
config := NewTestConfig()
config.Producer.Retry.Max = 0 // disable!
config.Producer.Retry.Backoff = 0
config.Producer.Return.Successes = true
producer, err := NewSyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
seedBroker.Close()
prodNotLeader := new(ProduceResponse)
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
leader1.Returns(prodNotLeader)
_, _, err = producer.SendMessage(&ProducerMessage{Topic: "my_topic", Value: StringEncoder(TestMessage)})
if !errors.Is(err, ErrNotLeaderForPartition) {
t.Fatal(err)
}
metadataLeader2 := new(MetadataResponse)
metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID())
metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, nil, ErrNoError)
leader1.Returns(metadataLeader2)
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader2.Returns(prodSuccess)
_, _, err = producer.SendMessage(&ProducerMessage{Topic: "my_topic", Value: StringEncoder(TestMessage)})
if err != nil {
t.Fatal(err)
}
leader1.Close()
leader2.Close()
safeClose(t, producer)
}
// This example shows the basic usage pattern of the SyncProducer.
func ExampleSyncProducer() {
producer, err := NewSyncProducer([]string{"localhost:9092"}, nil)
if err != nil {
log.Fatalln(err)
}
defer func() {
if err := producer.Close(); err != nil {
log.Fatalln(err)
}
}()
msg := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")}
partition, offset, err := producer.SendMessage(msg)
if err != nil {
log.Printf("FAILED to send message: %s\n", err)
} else {
log.Printf("> message sent to partition %d at offset %d\n", partition, offset)
}
}
golang-github-ibm-sarama-1.46.2/timestamp.go 0000664 0000000 0000000 00000001350 15072577001 0020677 0 ustar 00root root 0000000 0000000 package sarama
import (
"fmt"
"time"
)
type Timestamp struct {
*time.Time
}
func (t Timestamp) encode(pe packetEncoder) error {
timestamp := int64(-1)
if !t.Before(time.Unix(0, 0)) {
timestamp = t.UnixNano() / int64(time.Millisecond)
} else if !t.IsZero() {
return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", t)}
}
pe.putInt64(timestamp)
return nil
}
func (t Timestamp) decode(pd packetDecoder) error {
millis, err := pd.getInt64()
if err != nil {
return err
}
// negative timestamps are invalid, in these cases we should return
// a zero time
timestamp := time.Time{}
if millis >= 0 {
timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
}
*t.Time = timestamp
return nil
}
golang-github-ibm-sarama-1.46.2/tools/ 0000775 0000000 0000000 00000000000 15072577001 0017506 5 ustar 00root root 0000000 0000000 golang-github-ibm-sarama-1.46.2/tools/README.md 0000664 0000000 0000000 00000001613 15072577001 0020766 0 ustar 00root root 0000000 0000000 # Sarama tools
This folder contains applications that are useful for exploration of your Kafka cluster, or instrumentation.
Some of these tools mirror tools that ship with Kafka, but these tools won't require installing the JVM to function.
- [kafka-console-producer](./kafka-console-producer): a command line tool to produce a single message to your Kafka custer.
- [kafka-console-partitionconsumer](./kafka-console-partitionconsumer): (deprecated) a command line tool to consume a single partition of a topic on your Kafka cluster.
- [kafka-console-consumer](./kafka-console-consumer): a command line tool to consume arbitrary partitions of a topic on your Kafka cluster.
- [kafka-producer-performance](./kafka-producer-performance): a command line tool to performance test producers (sync and async) on your Kafka cluster.
To install all tools, run `go install github.com/IBM/sarama/tools/...@latest`
golang-github-ibm-sarama-1.46.2/tools/kafka-console-consumer/ 0000775 0000000 0000000 00000000000 15072577001 0024054 5 ustar 00root root 0000000 0000000 golang-github-ibm-sarama-1.46.2/tools/kafka-console-consumer/.gitignore 0000664 0000000 0000000 00000000063 15072577001 0026043 0 ustar 00root root 0000000 0000000 kafka-console-consumer
kafka-console-consumer.test
golang-github-ibm-sarama-1.46.2/tools/kafka-console-consumer/README.md 0000664 0000000 0000000 00000001660 15072577001 0025336 0 ustar 00root root 0000000 0000000 # kafka-console-consumer
A simple command line tool to consume partitions of a topic and print the
messages on the standard output.
### Installation
go get github.com/IBM/sarama/tools/kafka-console-consumer
### Usage
# Minimum invocation
kafka-console-consumer -topic=test -brokers=kafka1:9092
# It will pick up a KAFKA_PEERS environment variable
export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092
kafka-console-consumer -topic=test
# You can specify the offset you want to start at. It can be either
# `oldest`, `newest`. The default is `newest`.
kafka-console-consumer -topic=test -offset=oldest
kafka-console-consumer -topic=test -offset=newest
# You can specify the partition(s) you want to consume as a comma-separated
# list. The default is `all`.
kafka-console-consumer -topic=test -partitions=1,2,3
# Display all command line options
kafka-console-consumer -help
golang-github-ibm-sarama-1.46.2/tools/kafka-console-consumer/kafka-console-consumer.go 0000664 0000000 0000000 00000010510 15072577001 0030746 0 ustar 00root root 0000000 0000000 package main
import (
"flag"
"fmt"
"log"
"os"
"os/signal"
"strconv"
"strings"
"sync"
"syscall"
"github.com/IBM/sarama"
"github.com/IBM/sarama/tools/tls"
)
var (
brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster")
topic = flag.String("topic", "", "REQUIRED: the topic to consume")
partitions = flag.String("partitions", "all", "The partitions to consume, can be 'all' or comma-separated numbers")
offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`")
verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging")
tlsEnabled = flag.Bool("tls-enabled", false, "Whether to enable TLS")
tlsSkipVerify = flag.Bool("tls-skip-verify", false, "Whether skip TLS server cert verification")
tlsClientCert = flag.String("tls-client-cert", "", "Client cert for client authentication (use with -tls-enabled and -tls-client-key)")
tlsClientKey = flag.String("tls-client-key", "", "Client key for client authentication (use with tls-enabled and -tls-client-cert)")
bufferSize = flag.Int("buffer-size", 256, "The buffer size of the message channel.")
logger = log.New(os.Stderr, "", log.LstdFlags)
)
func main() {
flag.Parse()
if *brokerList == "" {
printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.")
}
if *topic == "" {
printUsageErrorAndExit("-topic is required")
}
if *verbose {
sarama.Logger = logger
}
var initialOffset int64
switch *offset {
case "oldest":
initialOffset = sarama.OffsetOldest
case "newest":
initialOffset = sarama.OffsetNewest
default:
printUsageErrorAndExit("-offset should be `oldest` or `newest`")
}
config := sarama.NewConfig()
if *tlsEnabled {
tlsConfig, err := tls.NewConfig(*tlsClientCert, *tlsClientKey)
if err != nil {
printErrorAndExit(69, "Failed to create TLS config: %s", err)
}
config.Net.TLS.Enable = true
config.Net.TLS.Config = tlsConfig
config.Net.TLS.Config.InsecureSkipVerify = *tlsSkipVerify
}
c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), config)
if err != nil {
printErrorAndExit(69, "Failed to start consumer: %s", err)
}
partitionList, err := getPartitions(c)
if err != nil {
printErrorAndExit(69, "Failed to get the list of partitions: %s", err)
}
var (
messages = make(chan *sarama.ConsumerMessage, *bufferSize)
closing = make(chan struct{})
wg sync.WaitGroup
)
go func() {
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGTERM, os.Interrupt)
<-signals
logger.Println("Initiating shutdown of consumer...")
close(closing)
}()
for _, partition := range partitionList {
pc, err := c.ConsumePartition(*topic, partition, initialOffset)
if err != nil {
printErrorAndExit(69, "Failed to start consumer for partition %d: %s", partition, err)
}
go func(pc sarama.PartitionConsumer) {
<-closing
pc.AsyncClose()
}(pc)
wg.Add(1)
go func(pc sarama.PartitionConsumer) {
defer wg.Done()
for message := range pc.Messages() {
messages <- message
}
}(pc)
}
go func() {
for msg := range messages {
fmt.Printf("Partition:\t%d\n", msg.Partition)
fmt.Printf("Offset:\t%d\n", msg.Offset)
fmt.Printf("Key:\t%s\n", string(msg.Key))
fmt.Printf("Value:\t%s\n", string(msg.Value))
fmt.Println()
}
}()
wg.Wait()
logger.Println("Done consuming topic", *topic)
close(messages)
if err := c.Close(); err != nil {
logger.Println("Failed to close consumer: ", err)
}
}
func getPartitions(c sarama.Consumer) ([]int32, error) {
if *partitions == "all" {
return c.Partitions(*topic)
}
tmp := strings.Split(*partitions, ",")
var pList []int32
for i := range tmp {
val, err := strconv.ParseInt(tmp[i], 10, 32)
if err != nil {
return nil, err
}
pList = append(pList, int32(val))
}
return pList, nil
}
func printErrorAndExit(code int, format string, values ...interface{}) {
fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
fmt.Fprintln(os.Stderr)
os.Exit(code)
}
func printUsageErrorAndExit(format string, values ...interface{}) {
fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
fmt.Fprintln(os.Stderr)
fmt.Fprintln(os.Stderr, "Available command line options:")
flag.PrintDefaults()
os.Exit(64)
}
golang-github-ibm-sarama-1.46.2/tools/kafka-console-partitionconsumer/ 0000775 0000000 0000000 00000000000 15072577001 0026006 5 ustar 00root root 0000000 0000000 golang-github-ibm-sarama-1.46.2/tools/kafka-console-partitionconsumer/.gitignore 0000664 0000000 0000000 00000000105 15072577001 0027772 0 ustar 00root root 0000000 0000000 kafka-console-partitionconsumer
kafka-console-partitionconsumer.test
golang-github-ibm-sarama-1.46.2/tools/kafka-console-partitionconsumer/README.md 0000664 0000000 0000000 00000001742 15072577001 0027271 0 ustar 00root root 0000000 0000000 # kafka-console-partitionconsumer
NOTE: this tool is deprecated in favour of the more general and more powerful
`kafka-console-consumer`.
A simple command line tool to consume a partition of a topic and print the messages
on the standard output.
### Installation
go get github.com/IBM/sarama/tools/kafka-console-partitionconsumer
### Usage
# Minimum invocation
kafka-console-partitionconsumer -topic=test -partition=4 -brokers=kafka1:9092
# It will pick up a KAFKA_PEERS environment variable
export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092
kafka-console-partitionconsumer -topic=test -partition=4
# You can specify the offset you want to start at. It can be either
# `oldest`, `newest`, or a specific offset number
kafka-console-partitionconsumer -topic=test -partition=3 -offset=oldest
kafka-console-partitionconsumer -topic=test -partition=2 -offset=1337
# Display all command line options
kafka-console-partitionconsumer -help
kafka-console-partitionconsumer.go 0000664 0000000 0000000 00000005017 15072577001 0034561 0 ustar 00root root 0000000 0000000 golang-github-ibm-sarama-1.46.2/tools/kafka-console-partitionconsumer package main
import (
"flag"
"fmt"
"log"
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"github.com/IBM/sarama"
)
var (
brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster")
topic = flag.String("topic", "", "REQUIRED: the topic to consume")
partition = flag.Int("partition", -1, "REQUIRED: the partition to consume")
offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`, or an actual offset")
verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging")
logger = log.New(os.Stderr, "", log.LstdFlags)
)
func main() {
flag.Parse()
if *brokerList == "" {
printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.")
}
if *topic == "" {
printUsageErrorAndExit("-topic is required")
}
if *partition == -1 {
printUsageErrorAndExit("-partition is required")
}
if *verbose {
sarama.Logger = logger
}
var (
initialOffset int64
offsetError error
)
switch *offset {
case "oldest":
initialOffset = sarama.OffsetOldest
case "newest":
initialOffset = sarama.OffsetNewest
default:
initialOffset, offsetError = strconv.ParseInt(*offset, 10, 64)
}
if offsetError != nil {
printUsageErrorAndExit("Invalid initial offset: %s", *offset)
}
c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil)
if err != nil {
printErrorAndExit(69, "Failed to start consumer: %s", err)
}
pc, err := c.ConsumePartition(*topic, int32(*partition), initialOffset)
if err != nil {
printErrorAndExit(69, "Failed to start partition consumer: %s", err)
}
go func() {
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGTERM, os.Interrupt)
<-signals
pc.AsyncClose()
}()
for msg := range pc.Messages() {
fmt.Printf("Offset:\t%d\n", msg.Offset)
fmt.Printf("Key:\t%s\n", string(msg.Key))
fmt.Printf("Value:\t%s\n", string(msg.Value))
fmt.Println()
}
if err := c.Close(); err != nil {
logger.Println("Failed to close consumer: ", err)
}
}
func printErrorAndExit(code int, format string, values ...interface{}) {
fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
fmt.Fprintln(os.Stderr)
os.Exit(code)
}
func printUsageErrorAndExit(format string, values ...interface{}) {
fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
fmt.Fprintln(os.Stderr)
fmt.Fprintln(os.Stderr, "Available command line options:")
flag.PrintDefaults()
os.Exit(64)
}
golang-github-ibm-sarama-1.46.2/tools/kafka-console-producer/ 0000775 0000000 0000000 00000000000 15072577001 0024044 5 ustar 00root root 0000000 0000000 golang-github-ibm-sarama-1.46.2/tools/kafka-console-producer/.gitignore 0000664 0000000 0000000 00000000063 15072577001 0026033 0 ustar 00root root 0000000 0000000 kafka-console-producer
kafka-console-producer.test
golang-github-ibm-sarama-1.46.2/tools/kafka-console-producer/README.md 0000664 0000000 0000000 00000002141 15072577001 0025321 0 ustar 00root root 0000000 0000000 # kafka-console-producer
A simple command line tool to produce a single message to Kafka.
### Installation
go get github.com/IBM/sarama/tools/kafka-console-producer
### Usage
# Minimum invocation
kafka-console-producer -topic=test -value=value -brokers=kafka1:9092
# It will pick up a KAFKA_PEERS environment variable
export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092
kafka-console-producer -topic=test -value=value
# It will read the value from stdin by using pipes
echo "hello world" | kafka-console-producer -topic=test
# Specify a key:
echo "hello world" | kafka-console-producer -topic=test -key=key
# Partitioning: by default, kafka-console-producer will partition as follows:
# - manual partitioning if a -partition is provided
# - hash partitioning by key if a -key is provided
# - random partitioning otherwise.
#
# You can override this using the -partitioner argument:
echo "hello world" | kafka-console-producer -topic=test -key=key -partitioner=random
# Display all command line options
kafka-console-producer -help
golang-github-ibm-sarama-1.46.2/tools/kafka-console-producer/kafka-console-producer.go 0000664 0000000 0000000 00000012111 15072577001 0030725 0 ustar 00root root 0000000 0000000 package main
import (
"flag"
"fmt"
"io"
"log"
"os"
"strings"
"github.com/rcrowley/go-metrics"
"github.com/IBM/sarama"
"github.com/IBM/sarama/tools/tls"
)
var (
brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster. You can also set the KAFKA_PEERS environment variable")
headers = flag.String("headers", "", "The headers of the message to produce. Example: -headers=foo:bar,bar:foo")
topic = flag.String("topic", "", "REQUIRED: the topic to produce to")
key = flag.String("key", "", "The key of the message to produce. Can be empty.")
value = flag.String("value", "", "REQUIRED: the value of the message to produce. You can also provide the value on stdin.")
partitioner = flag.String("partitioner", "", "The partitioning scheme to use. Can be `hash`, `manual`, or `random`")
partition = flag.Int("partition", -1, "The partition to produce to.")
verbose = flag.Bool("verbose", false, "Turn on sarama logging to stderr")
showMetrics = flag.Bool("metrics", false, "Output metrics on successful publish to stderr")
silent = flag.Bool("silent", false, "Turn off printing the message's topic, partition, and offset to stdout")
tlsEnabled = flag.Bool("tls-enabled", false, "Whether to enable TLS")
tlsSkipVerify = flag.Bool("tls-skip-verify", false, "Whether skip TLS server cert verification")
tlsClientCert = flag.String("tls-client-cert", "", "Client cert for client authentication (use with -tls-enabled and -tls-client-key)")
tlsClientKey = flag.String("tls-client-key", "", "Client key for client authentication (use with tls-enabled and -tls-client-cert)")
logger = log.New(os.Stderr, "", log.LstdFlags)
)
func main() {
flag.Parse()
if *brokerList == "" {
printUsageErrorAndExit("no -brokers specified. Alternatively, set the KAFKA_PEERS environment variable")
}
if *topic == "" {
printUsageErrorAndExit("no -topic specified")
}
if *verbose {
sarama.Logger = logger
}
config := sarama.NewConfig()
config.Producer.RequiredAcks = sarama.WaitForAll
config.Producer.Return.Successes = true
if *tlsEnabled {
tlsConfig, err := tls.NewConfig(*tlsClientCert, *tlsClientKey)
if err != nil {
printErrorAndExit(69, "Failed to create TLS config: %s", err)
}
config.Net.TLS.Enable = true
config.Net.TLS.Config = tlsConfig
config.Net.TLS.Config.InsecureSkipVerify = *tlsSkipVerify
}
switch *partitioner {
case "":
if *partition >= 0 {
config.Producer.Partitioner = sarama.NewManualPartitioner
} else {
config.Producer.Partitioner = sarama.NewHashPartitioner
}
case "hash":
config.Producer.Partitioner = sarama.NewHashPartitioner
case "random":
config.Producer.Partitioner = sarama.NewRandomPartitioner
case "manual":
config.Producer.Partitioner = sarama.NewManualPartitioner
if *partition == -1 {
printUsageErrorAndExit("-partition is required when partitioning manually")
}
default:
printUsageErrorAndExit(fmt.Sprintf("Partitioner %s not supported.", *partitioner))
}
message := &sarama.ProducerMessage{Topic: *topic, Partition: int32(*partition)}
if *key != "" {
message.Key = sarama.StringEncoder(*key)
}
if *value != "" {
message.Value = sarama.StringEncoder(*value)
} else if stdinAvailable() {
bytes, err := io.ReadAll(os.Stdin)
if err != nil {
printErrorAndExit(66, "Failed to read data from the standard input: %s", err)
}
message.Value = sarama.ByteEncoder(bytes)
} else {
printUsageErrorAndExit("-value is required, or you have to provide the value on stdin")
}
if *headers != "" {
var hdrs []sarama.RecordHeader
for h := range strings.SplitSeq(*headers, ",") {
if header := strings.Split(h, ":"); len(header) != 2 {
printUsageErrorAndExit("-header should be key:value. Example: -headers=foo:bar,bar:foo")
} else {
hdrs = append(hdrs, sarama.RecordHeader{
Key: []byte(header[0]),
Value: []byte(header[1]),
})
}
}
if len(hdrs) != 0 {
message.Headers = hdrs
}
}
producer, err := sarama.NewSyncProducer(strings.Split(*brokerList, ","), config)
if err != nil {
printErrorAndExit(69, "Failed to open Kafka producer: %s", err)
}
defer func() {
if err := producer.Close(); err != nil {
logger.Println("Failed to close Kafka producer cleanly:", err)
}
}()
partition, offset, err := producer.SendMessage(message)
if err != nil {
printErrorAndExit(69, "Failed to produce message: %s", err)
} else if !*silent {
fmt.Printf("topic=%s\tpartition=%d\toffset=%d\n", *topic, partition, offset)
}
if *showMetrics {
metrics.WriteOnce(config.MetricRegistry, os.Stderr)
}
}
func printErrorAndExit(code int, format string, values ...interface{}) {
fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
fmt.Fprintln(os.Stderr)
os.Exit(code)
}
func printUsageErrorAndExit(message string) {
fmt.Fprintln(os.Stderr, "ERROR:", message)
fmt.Fprintln(os.Stderr)
fmt.Fprintln(os.Stderr, "Available command line options:")
flag.PrintDefaults()
os.Exit(64)
}
func stdinAvailable() bool {
stat, _ := os.Stdin.Stat()
return (stat.Mode() & os.ModeCharDevice) == 0
}
golang-github-ibm-sarama-1.46.2/tools/kafka-producer-performance/ 0000775 0000000 0000000 00000000000 15072577001 0024703 5 ustar 00root root 0000000 0000000 golang-github-ibm-sarama-1.46.2/tools/kafka-producer-performance/README.md 0000664 0000000 0000000 00000000623 15072577001 0026163 0 ustar 00root root 0000000 0000000 # kafka-producer-performance
A command line tool to test producer performance.
### Installation
go get github.com/IBM/sarama/tools/kafka-producer-performance
### Usage
# Display all command line options
kafka-producer-performance -help
# Minimum invocation
kafka-producer-performance \
-brokers=kafka:9092 \
-message-load=50000 \
-message-size=100 \
-topic=producer_test
golang-github-ibm-sarama-1.46.2/tools/kafka-producer-performance/main.go 0000664 0000000 0000000 00000031305 15072577001 0026160 0 ustar 00root root 0000000 0000000 package main
import (
"context"
"crypto/rand"
"crypto/x509"
"flag"
"fmt"
"io"
"log"
"os"
"strings"
gosync "sync"
"time"
"github.com/rcrowley/go-metrics"
"github.com/IBM/sarama"
"github.com/IBM/sarama/tools/tls"
)
var (
sync = flag.Bool(
"sync",
false,
"Use a synchronous producer.",
)
messageLoad = flag.Int(
"message-load",
0,
"REQUIRED: The number of messages to produce to -topic.",
)
messageSize = flag.Int(
"message-size",
0,
"REQUIRED: The approximate size (in bytes) of each message to produce to -topic.",
)
brokers = flag.String(
"brokers",
"",
"REQUIRED: A comma separated list of broker addresses.",
)
securityProtocol = flag.String(
"security-protocol",
"PLAINTEXT",
"The name of the security protocol to talk to Kafka (PLAINTEXT, SSL).",
)
tlsRootCACerts = flag.String(
"tls-ca-certs",
"",
"The path to a file that contains a set of root certificate authorities in PEM format "+
"to trust when verifying broker certificates when -security-protocol=SSL "+
"(leave empty to use the host's root CA set).",
)
tlsClientCert = flag.String(
"tls-client-cert",
"",
"The path to a file that contains the client certificate to send to the broker "+
"in PEM format if client authentication is required when -security-protocol=SSL "+
"(leave empty to disable client authentication).",
)
tlsClientKey = flag.String(
"tls-client-key",
"",
"The path to a file that contains the client private key linked to the client certificate "+
"in PEM format when -security-protocol=SSL (REQUIRED if tls-client-cert is provided).",
)
topic = flag.String(
"topic",
"",
"REQUIRED: The topic to run the performance test on.",
)
partition = flag.Int(
"partition",
-1,
"The partition of -topic to run the performance test on.",
)
throughput = flag.Int(
"throughput",
0,
"The maximum number of messages to send per second (0 for no limit).",
)
maxOpenRequests = flag.Int(
"max-open-requests",
5,
"The maximum number of unacknowledged requests the client will send on a single connection before blocking.",
)
maxMessageBytes = flag.Int(
"max-message-bytes",
1000000,
"The max permitted size of a message.",
)
requiredAcks = flag.Int(
"required-acks",
1,
"The required number of acks needed from the broker (-1: all, 0: none, 1: local).",
)
timeout = flag.Duration(
"timeout",
10*time.Second,
"The duration the producer will wait to receive -required-acks.",
)
partitioner = flag.String(
"partitioner",
"roundrobin",
"The partitioning scheme to use (hash, manual, random, roundrobin).",
)
compression = flag.String(
"compression",
"none",
"The compression method to use (none, gzip, snappy, lz4).",
)
flushFrequency = flag.Duration(
"flush-frequency",
0,
"The best-effort frequency of flushes.",
)
flushBytes = flag.Int(
"flush-bytes",
0,
"The best-effort number of bytes needed to trigger a flush.",
)
flushMessages = flag.Int(
"flush-messages",
0,
"The best-effort number of messages needed to trigger a flush.",
)
flushMaxMessages = flag.Int(
"flush-max-messages",
0,
"The maximum number of messages the producer will send in a single request.",
)
clientID = flag.String(
"client-id",
"sarama",
"The client ID sent with every request to the brokers.",
)
channelBufferSize = flag.Int(
"channel-buffer-size",
256,
"The number of events to buffer in internal and external channels.",
)
routines = flag.Int(
"routines",
1,
"The number of routines to send the messages from (-sync only).",
)
version = flag.String(
"version",
"0.8.2.0",
"The assumed version of Kafka.",
)
verbose = flag.Bool(
"verbose",
false,
"Turn on sarama logging to stderr",
)
)
func parseCompression(scheme string) sarama.CompressionCodec {
switch scheme {
case "none":
return sarama.CompressionNone
case "gzip":
return sarama.CompressionGZIP
case "snappy":
return sarama.CompressionSnappy
case "lz4":
return sarama.CompressionLZ4
default:
printUsageErrorAndExit(fmt.Sprintf("Unknown -compression: %s", scheme))
}
panic("should not happen")
}
func parsePartitioner(scheme string, partition int) sarama.PartitionerConstructor {
if partition < 0 && scheme == "manual" {
printUsageErrorAndExit("-partition must not be -1 for -partitioning=manual")
}
switch scheme {
case "manual":
return sarama.NewManualPartitioner
case "hash":
return sarama.NewHashPartitioner
case "random":
return sarama.NewRandomPartitioner
case "roundrobin":
return sarama.NewRoundRobinPartitioner
default:
printUsageErrorAndExit(fmt.Sprintf("Unknown -partitioning: %s", scheme))
}
panic("should not happen")
}
func parseVersion(version string) sarama.KafkaVersion {
result, err := sarama.ParseKafkaVersion(version)
if err != nil {
printUsageErrorAndExit(fmt.Sprintf("unknown -version: %s", version))
}
return result
}
func generateMessages(topic string, partition, messageLoad, messageSize int) []*sarama.ProducerMessage {
messages := make([]*sarama.ProducerMessage, messageLoad)
for i := 0; i < messageLoad; i++ {
payload := make([]byte, messageSize)
if _, err := rand.Read(payload); err != nil {
printErrorAndExit(69, "Failed to generate message payload: %s", err)
}
messages[i] = &sarama.ProducerMessage{
Topic: topic,
Partition: int32(partition),
Value: sarama.ByteEncoder(payload),
}
}
return messages
}
func main() {
flag.Parse()
if *brokers == "" {
printUsageErrorAndExit("-brokers is required")
}
if *topic == "" {
printUsageErrorAndExit("-topic is required")
}
if *messageLoad <= 0 {
printUsageErrorAndExit("-message-load must be greater than 0")
}
if *messageSize <= 0 {
printUsageErrorAndExit("-message-size must be greater than 0")
}
if *routines < 1 || *routines > *messageLoad {
printUsageErrorAndExit("-routines must be greater than 0 and less than or equal to -message-load")
}
if *securityProtocol != "PLAINTEXT" && *securityProtocol != "SSL" {
printUsageErrorAndExit(fmt.Sprintf("-security-protocol %q is not supported", *securityProtocol))
}
if *verbose {
sarama.Logger = log.New(os.Stderr, "", log.LstdFlags)
}
config := sarama.NewConfig()
config.Net.MaxOpenRequests = *maxOpenRequests
config.Producer.MaxMessageBytes = *maxMessageBytes
config.Producer.RequiredAcks = sarama.RequiredAcks(*requiredAcks)
config.Producer.Timeout = *timeout
config.Producer.Partitioner = parsePartitioner(*partitioner, *partition)
config.Producer.Compression = parseCompression(*compression)
config.Producer.Flush.Frequency = *flushFrequency
config.Producer.Flush.Bytes = *flushBytes
config.Producer.Flush.Messages = *flushMessages
config.Producer.Flush.MaxMessages = *flushMaxMessages
config.Producer.Return.Successes = true
config.ClientID = *clientID
config.ChannelBufferSize = *channelBufferSize
config.Version = parseVersion(*version)
if *securityProtocol == "SSL" {
tlsConfig, err := tls.NewConfig(*tlsClientCert, *tlsClientKey)
if err != nil {
printErrorAndExit(69, "failed to load client certificate from: %s and private key from: %s: %v",
*tlsClientCert, *tlsClientKey, err)
}
if *tlsRootCACerts != "" {
rootCAsBytes, err := os.ReadFile(*tlsRootCACerts)
if err != nil {
printErrorAndExit(69, "failed to read root CA certificates: %v", err)
}
certPool := x509.NewCertPool()
if !certPool.AppendCertsFromPEM(rootCAsBytes) {
printErrorAndExit(69, "failed to load root CA certificates from file: %s", *tlsRootCACerts)
}
// Use specific root CA set vs the host's set
tlsConfig.RootCAs = certPool
}
config.Net.TLS.Enable = true
config.Net.TLS.Config = tlsConfig
}
if err := config.Validate(); err != nil {
printErrorAndExit(69, "Invalid configuration: %s", err)
}
// Print out metrics periodically.
done := make(chan struct{})
ctx, cancel := context.WithCancel(context.Background())
go func(ctx context.Context) {
defer close(done)
t := time.Tick(5 * time.Second)
for {
select {
case <-t:
printMetrics(os.Stdout, config.MetricRegistry)
case <-ctx.Done():
return
}
}
}(ctx)
brokers := strings.Split(*brokers, ",")
if *sync {
runSyncProducer(*topic, *partition, *messageLoad, *messageSize, *routines,
config, brokers, *throughput)
} else {
runAsyncProducer(*topic, *partition, *messageLoad, *messageSize,
config, brokers, *throughput)
}
cancel()
<-done
}
func runAsyncProducer(topic string, partition, messageLoad, messageSize int,
config *sarama.Config, brokers []string, throughput int) {
producer, err := sarama.NewAsyncProducer(brokers, config)
if err != nil {
printErrorAndExit(69, "Failed to create producer: %s", err)
}
defer func() {
// Print final metrics.
printMetrics(os.Stdout, config.MetricRegistry)
if err := producer.Close(); err != nil {
printErrorAndExit(69, "Failed to close producer: %s", err)
}
}()
messages := generateMessages(topic, partition, messageLoad, messageSize)
messagesDone := make(chan struct{})
go func() {
for i := 0; i < messageLoad; i++ {
select {
case <-producer.Successes():
case err = <-producer.Errors():
printErrorAndExit(69, "%s", err)
}
}
messagesDone <- struct{}{}
}()
if throughput > 0 {
ticker := time.NewTicker(time.Second)
for idx, message := range messages {
producer.Input() <- message
if (idx+1)%throughput == 0 {
<-ticker.C
}
}
ticker.Stop()
} else {
for _, message := range messages {
producer.Input() <- message
}
}
<-messagesDone
close(messagesDone)
}
func runSyncProducer(topic string, partition, messageLoad, messageSize, routines int,
config *sarama.Config, brokers []string, throughput int) {
producer, err := sarama.NewSyncProducer(brokers, config)
if err != nil {
printErrorAndExit(69, "Failed to create producer: %s", err)
}
defer func() {
// Print final metrics.
printMetrics(os.Stdout, config.MetricRegistry)
if err := producer.Close(); err != nil {
printErrorAndExit(69, "Failed to close producer: %s", err)
}
}()
messages := make([][]*sarama.ProducerMessage, routines)
for i := 0; i < routines; i++ {
if i == routines-1 {
messages[i] = generateMessages(topic, partition, messageLoad/routines+messageLoad%routines, messageSize)
} else {
messages[i] = generateMessages(topic, partition, messageLoad/routines, messageSize)
}
}
var wg gosync.WaitGroup
if throughput > 0 {
for _, messages := range messages {
wg.Add(1)
go func() {
ticker := time.NewTicker(time.Second)
for _, message := range messages {
for i := 0; i < throughput; i++ {
_, _, err = producer.SendMessage(message)
if err != nil {
printErrorAndExit(69, "Failed to send message: %s", err)
}
}
<-ticker.C
}
ticker.Stop()
wg.Done()
}()
}
} else {
for _, messages := range messages {
wg.Add(1)
go func() {
for _, message := range messages {
_, _, err = producer.SendMessage(message)
if err != nil {
printErrorAndExit(69, "Failed to send message: %s", err)
}
}
wg.Done()
}()
}
}
wg.Wait()
}
func printMetrics(w io.Writer, r metrics.Registry) {
recordSendRateMetric := r.Get("record-send-rate")
requestLatencyMetric := r.Get("request-latency-in-ms")
outgoingByteRateMetric := r.Get("outgoing-byte-rate")
requestsInFlightMetric := r.Get("requests-in-flight")
if recordSendRateMetric == nil || requestLatencyMetric == nil || outgoingByteRateMetric == nil ||
requestsInFlightMetric == nil {
return
}
recordSendRate := recordSendRateMetric.(metrics.Meter).Snapshot()
requestLatency := requestLatencyMetric.(metrics.Histogram).Snapshot()
requestLatencyPercentiles := requestLatency.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
outgoingByteRate := outgoingByteRateMetric.(metrics.Meter).Snapshot()
requestsInFlight := requestsInFlightMetric.(metrics.Counter).Count()
fmt.Fprintf(w, "%d records sent, %.1f records/sec (%.2f MiB/sec ingress, %.2f MiB/sec egress), "+
"%.1f ms avg latency, %.1f ms stddev, %.1f ms 50th, %.1f ms 75th, "+
"%.1f ms 95th, %.1f ms 99th, %.1f ms 99.9th, %d total req. in flight\n",
recordSendRate.Count(),
recordSendRate.RateMean(),
recordSendRate.RateMean()*float64(*messageSize)/1024/1024,
outgoingByteRate.RateMean()/1024/1024,
requestLatency.Mean(),
requestLatency.StdDev(),
requestLatencyPercentiles[0],
requestLatencyPercentiles[1],
requestLatencyPercentiles[2],
requestLatencyPercentiles[3],
requestLatencyPercentiles[4],
requestsInFlight,
)
}
func printUsageErrorAndExit(message string) {
fmt.Fprintln(os.Stderr, "ERROR:", message)
fmt.Fprintln(os.Stderr)
fmt.Fprintln(os.Stderr, "Available command line options:")
flag.PrintDefaults()
os.Exit(64)
}
func printErrorAndExit(code int, format string, values ...interface{}) {
fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
fmt.Fprintln(os.Stderr)
os.Exit(code)
}
golang-github-ibm-sarama-1.46.2/tools/tls/ 0000775 0000000 0000000 00000000000 15072577001 0020310 5 ustar 00root root 0000000 0000000 golang-github-ibm-sarama-1.46.2/tools/tls/config.go 0000664 0000000 0000000 00000000611 15072577001 0022102 0 ustar 00root root 0000000 0000000 package tls
import "crypto/tls"
func NewConfig(clientCert, clientKey string) (*tls.Config, error) {
tlsConfig := tls.Config{
MinVersion: tls.VersionTLS12,
}
if clientCert != "" && clientKey != "" {
cert, err := tls.LoadX509KeyPair(clientCert, clientKey)
if err != nil {
return &tlsConfig, err
}
tlsConfig.Certificates = []tls.Certificate{cert}
}
return &tlsConfig, nil
}
golang-github-ibm-sarama-1.46.2/transaction_manager.go 0000664 0000000 0000000 00000072356 15072577001 0022731 0 ustar 00root root 0000000 0000000 package sarama
import (
"errors"
"fmt"
"strings"
"sync"
"time"
)
// ProducerTxnStatusFlag mark current transaction status.
type ProducerTxnStatusFlag int16
const (
// ProducerTxnFlagUninitialized when txnmgr is created
ProducerTxnFlagUninitialized ProducerTxnStatusFlag = 1 << iota
// ProducerTxnFlagInitializing when txnmgr is initializing
ProducerTxnFlagInitializing
// ProducerTxnFlagReady when is ready to receive transaction
ProducerTxnFlagReady
// ProducerTxnFlagInTransaction when transaction is started
ProducerTxnFlagInTransaction
// ProducerTxnFlagEndTransaction when transaction will be committed
ProducerTxnFlagEndTransaction
// ProducerTxnFlagInError when having abortable or fatal error
ProducerTxnFlagInError
// ProducerTxnFlagCommittingTransaction when committing txn
ProducerTxnFlagCommittingTransaction
// ProducerTxnFlagAbortingTransaction when committing txn
ProducerTxnFlagAbortingTransaction
// ProducerTxnFlagAbortableError when producer encounter an abortable error
// Must call AbortTxn in this case.
ProducerTxnFlagAbortableError
// ProducerTxnFlagFatalError when producer encounter an fatal error
// Must Close an recreate it.
ProducerTxnFlagFatalError
)
func (s ProducerTxnStatusFlag) String() string {
status := make([]string, 0)
if s&ProducerTxnFlagUninitialized != 0 {
status = append(status, "ProducerTxnStateUninitialized")
}
if s&ProducerTxnFlagInitializing != 0 {
status = append(status, "ProducerTxnStateInitializing")
}
if s&ProducerTxnFlagReady != 0 {
status = append(status, "ProducerTxnStateReady")
}
if s&ProducerTxnFlagInTransaction != 0 {
status = append(status, "ProducerTxnStateInTransaction")
}
if s&ProducerTxnFlagEndTransaction != 0 {
status = append(status, "ProducerTxnStateEndTransaction")
}
if s&ProducerTxnFlagInError != 0 {
status = append(status, "ProducerTxnStateInError")
}
if s&ProducerTxnFlagCommittingTransaction != 0 {
status = append(status, "ProducerTxnStateCommittingTransaction")
}
if s&ProducerTxnFlagAbortingTransaction != 0 {
status = append(status, "ProducerTxnStateAbortingTransaction")
}
if s&ProducerTxnFlagAbortableError != 0 {
status = append(status, "ProducerTxnStateAbortableError")
}
if s&ProducerTxnFlagFatalError != 0 {
status = append(status, "ProducerTxnStateFatalError")
}
return strings.Join(status, "|")
}
// transactionManager keeps the state necessary to ensure idempotent production
type transactionManager struct {
producerID int64
producerEpoch int16
sequenceNumbers map[string]int32
mutex sync.Mutex
transactionalID string
transactionTimeout time.Duration
client Client
// when kafka cluster is at least 2.5.0.
// used to recover when producer failed.
coordinatorSupportsBumpingEpoch bool
// When producer need to bump it's epoch.
epochBumpRequired bool
// Record last seen error.
lastError error
// Ensure that status is never accessed with a race-condition.
statusLock sync.RWMutex
status ProducerTxnStatusFlag
// Ensure that only one goroutine will update partitions in current transaction.
partitionInTxnLock sync.Mutex
pendingPartitionsInCurrentTxn topicPartitionSet
partitionsInCurrentTxn topicPartitionSet
// Offsets to add to transaction.
offsetsInCurrentTxn map[string]topicPartitionOffsets
}
const (
noProducerID = -1
noProducerEpoch = -1
// see publishTxnPartitions comment.
addPartitionsRetryBackoff = 20 * time.Millisecond
)
// txnmngr allowed transitions.
var producerTxnTransitions = map[ProducerTxnStatusFlag][]ProducerTxnStatusFlag{
ProducerTxnFlagUninitialized: {
ProducerTxnFlagReady,
ProducerTxnFlagInError,
},
// When we need are initializing
ProducerTxnFlagInitializing: {
ProducerTxnFlagInitializing,
ProducerTxnFlagReady,
ProducerTxnFlagInError,
},
// When we have initialized transactional producer
ProducerTxnFlagReady: {
ProducerTxnFlagInTransaction,
},
// When beginTxn has been called
ProducerTxnFlagInTransaction: {
// When calling commit or abort
ProducerTxnFlagEndTransaction,
// When got an error
ProducerTxnFlagInError,
},
ProducerTxnFlagEndTransaction: {
// When epoch bump
ProducerTxnFlagInitializing,
// When commit is good
ProducerTxnFlagReady,
// When got an error
ProducerTxnFlagInError,
},
// Need to abort transaction
ProducerTxnFlagAbortableError: {
// Call AbortTxn
ProducerTxnFlagAbortingTransaction,
// When got an error
ProducerTxnFlagInError,
},
// Need to close producer
ProducerTxnFlagFatalError: {
ProducerTxnFlagFatalError,
},
}
type topicPartition struct {
topic string
partition int32
}
// to ensure that we don't do a full scan every time a partition or an offset is added.
type (
topicPartitionSet map[topicPartition]struct{}
topicPartitionOffsets map[topicPartition]*PartitionOffsetMetadata
)
func (s topicPartitionSet) mapToRequest() map[string][]int32 {
result := make(map[string][]int32, len(s))
for tp := range s {
result[tp.topic] = append(result[tp.topic], tp.partition)
}
return result
}
func (s topicPartitionOffsets) mapToRequest() map[string][]*PartitionOffsetMetadata {
result := make(map[string][]*PartitionOffsetMetadata, len(s))
for tp, offset := range s {
result[tp.topic] = append(result[tp.topic], offset)
}
return result
}
// Return true if current transition is allowed.
func (t *transactionManager) isTransitionValid(target ProducerTxnStatusFlag) bool {
for status, allowedTransitions := range producerTxnTransitions {
if status&t.status != 0 {
for _, allowedTransition := range allowedTransitions {
if allowedTransition&target != 0 {
return true
}
}
}
}
return false
}
// Get current transaction status.
func (t *transactionManager) currentTxnStatus() ProducerTxnStatusFlag {
t.statusLock.RLock()
defer t.statusLock.RUnlock()
return t.status
}
// Try to transition to a valid status and return an error otherwise.
func (t *transactionManager) transitionTo(target ProducerTxnStatusFlag, err error) error {
t.statusLock.Lock()
defer t.statusLock.Unlock()
if !t.isTransitionValid(target) {
return ErrTransitionNotAllowed
}
if target&ProducerTxnFlagInError != 0 {
if err == nil {
return ErrCannotTransitionNilError
}
t.lastError = err
} else {
t.lastError = nil
}
DebugLogger.Printf("txnmgr/transition [%s] transition from %s to %s\n", t.transactionalID, t.status, target)
t.status = target
return err
}
func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) {
key := fmt.Sprintf("%s-%d", topic, partition)
t.mutex.Lock()
defer t.mutex.Unlock()
sequence := t.sequenceNumbers[key]
t.sequenceNumbers[key] = sequence + 1
return sequence, t.producerEpoch
}
func (t *transactionManager) bumpEpoch() {
t.mutex.Lock()
defer t.mutex.Unlock()
t.producerEpoch++
for k := range t.sequenceNumbers {
t.sequenceNumbers[k] = 0
}
}
func (t *transactionManager) getProducerID() (int64, int16) {
t.mutex.Lock()
defer t.mutex.Unlock()
return t.producerID, t.producerEpoch
}
// Compute retry backoff considered current attempts.
func (t *transactionManager) computeBackoff(attemptsRemaining int) time.Duration {
if t.client.Config().Producer.Transaction.Retry.BackoffFunc != nil {
maxRetries := t.client.Config().Producer.Transaction.Retry.Max
retries := maxRetries - attemptsRemaining
return t.client.Config().Producer.Transaction.Retry.BackoffFunc(retries, maxRetries)
}
return t.client.Config().Producer.Transaction.Retry.Backoff
}
// return true is txnmngr is transactinal.
func (t *transactionManager) isTransactional() bool {
return t.transactionalID != ""
}
// add specified offsets to current transaction.
func (t *transactionManager) addOffsetsToTxn(offsetsToAdd map[string][]*PartitionOffsetMetadata, groupId string) error {
t.mutex.Lock()
defer t.mutex.Unlock()
if t.currentTxnStatus()&ProducerTxnFlagInTransaction == 0 {
return ErrTransactionNotReady
}
if t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 {
return t.lastError
}
if _, ok := t.offsetsInCurrentTxn[groupId]; !ok {
t.offsetsInCurrentTxn[groupId] = topicPartitionOffsets{}
}
for topic, offsets := range offsetsToAdd {
for _, offset := range offsets {
tp := topicPartition{topic: topic, partition: offset.Partition}
t.offsetsInCurrentTxn[groupId][tp] = offset
}
}
return nil
}
// send txnmgnr save offsets to transaction coordinator.
func (t *transactionManager) publishOffsetsToTxn(offsets topicPartitionOffsets, groupId string) (topicPartitionOffsets, error) {
// First AddOffsetsToTxn
attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max
exec := func(run func() (bool, error), err error) error {
for attemptsRemaining >= 0 {
var retry bool
retry, err = run()
if !retry {
return err
}
backoff := t.computeBackoff(attemptsRemaining)
Logger.Printf("txnmgr/add-offset-to-txn [%s] retrying after %dms... (%d attempts remaining) (%s)\n",
t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err)
time.Sleep(backoff)
attemptsRemaining--
}
return err
}
lastError := exec(func() (bool, error) {
coordinator, err := t.client.TransactionCoordinator(t.transactionalID)
if err != nil {
return true, err
}
request := &AddOffsetsToTxnRequest{
TransactionalID: t.transactionalID,
ProducerEpoch: t.producerEpoch,
ProducerID: t.producerID,
GroupID: groupId,
}
if t.client.Config().Version.IsAtLeast(V2_7_0_0) {
// Version 2 adds the support for new error code PRODUCER_FENCED.
request.Version = 2
} else if t.client.Config().Version.IsAtLeast(V2_0_0_0) {
// Version 1 is the same as version 0.
request.Version = 1
}
response, err := coordinator.AddOffsetsToTxn(request)
if err != nil {
// If an error occurred try to refresh current transaction coordinator.
_ = coordinator.Close()
_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
return true, err
}
if response == nil {
// If no response is returned just retry.
return true, ErrTxnUnableToParseResponse
}
if response.Err == ErrNoError {
DebugLogger.Printf("txnmgr/add-offset-to-txn [%s] successful add-offset-to-txn with group %s %+v\n",
t.transactionalID, groupId, response)
// If no error, just exit.
return false, nil
}
switch response.Err {
case ErrConsumerCoordinatorNotAvailable:
fallthrough
case ErrNotCoordinatorForConsumer:
_ = coordinator.Close()
_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
fallthrough
case ErrOffsetsLoadInProgress:
fallthrough
case ErrConcurrentTransactions:
// Retry
case ErrUnknownProducerID:
fallthrough
case ErrInvalidProducerIDMapping:
return false, t.abortableErrorIfPossible(response.Err)
case ErrGroupAuthorizationFailed:
return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, response.Err)
default:
// Others are fatal
return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err)
}
return true, response.Err
}, nil)
if lastError != nil {
return offsets, lastError
}
resultOffsets := offsets
// Then TxnOffsetCommit
// note the result is not completed until the TxnOffsetCommit returns
attemptsRemaining = t.client.Config().Producer.Transaction.Retry.Max
execTxnOffsetCommit := func(run func() (topicPartitionOffsets, bool, error), err error) (topicPartitionOffsets, error) {
var r topicPartitionOffsets
for attemptsRemaining >= 0 {
var retry bool
r, retry, err = run()
if !retry {
return r, err
}
backoff := t.computeBackoff(attemptsRemaining)
Logger.Printf("txnmgr/txn-offset-commit [%s] retrying after %dms... (%d attempts remaining) (%s)\n",
t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err)
time.Sleep(backoff)
attemptsRemaining--
}
return r, err
}
return execTxnOffsetCommit(func() (topicPartitionOffsets, bool, error) {
consumerGroupCoordinator, err := t.client.Coordinator(groupId)
if err != nil {
return resultOffsets, true, err
}
request := &TxnOffsetCommitRequest{
TransactionalID: t.transactionalID,
ProducerEpoch: t.producerEpoch,
ProducerID: t.producerID,
GroupID: groupId,
Topics: offsets.mapToRequest(),
}
if t.client.Config().Version.IsAtLeast(V2_1_0_0) {
// Version 2 adds the committed leader epoch.
request.Version = 2
} else if t.client.Config().Version.IsAtLeast(V2_0_0_0) {
// Version 1 is the same as version 0.
request.Version = 1
}
responses, err := consumerGroupCoordinator.TxnOffsetCommit(request)
if err != nil {
_ = consumerGroupCoordinator.Close()
_ = t.client.RefreshCoordinator(groupId)
return resultOffsets, true, err
}
if responses == nil {
return resultOffsets, true, ErrTxnUnableToParseResponse
}
var responseErrors []error
failedTxn := topicPartitionOffsets{}
for topic, partitionErrors := range responses.Topics {
for _, partitionError := range partitionErrors {
switch partitionError.Err {
case ErrNoError:
continue
// If the topic is unknown or the coordinator is loading, retry with the current coordinator
case ErrRequestTimedOut:
fallthrough
case ErrConsumerCoordinatorNotAvailable:
fallthrough
case ErrNotCoordinatorForConsumer:
_ = consumerGroupCoordinator.Close()
_ = t.client.RefreshCoordinator(groupId)
fallthrough
case ErrUnknownTopicOrPartition:
fallthrough
case ErrOffsetsLoadInProgress:
// Do nothing just retry
case ErrIllegalGeneration:
fallthrough
case ErrUnknownMemberId:
fallthrough
case ErrFencedInstancedId:
fallthrough
case ErrGroupAuthorizationFailed:
return resultOffsets, false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, partitionError.Err)
default:
// Others are fatal
return resultOffsets, false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, partitionError.Err)
}
tp := topicPartition{topic: topic, partition: partitionError.Partition}
failedTxn[tp] = offsets[tp]
responseErrors = append(responseErrors, partitionError.Err)
}
}
resultOffsets = failedTxn
if len(resultOffsets) == 0 {
DebugLogger.Printf("txnmgr/txn-offset-commit [%s] successful txn-offset-commit with group %s\n",
t.transactionalID, groupId)
return resultOffsets, false, nil
}
return resultOffsets, true, Wrap(ErrTxnOffsetCommit, responseErrors...)
}, nil)
}
func (t *transactionManager) initProducerId() (int64, int16, error) {
isEpochBump := false
req := &InitProducerIDRequest{}
if t.isTransactional() {
req.TransactionalID = &t.transactionalID
req.TransactionTimeout = t.transactionTimeout
}
if t.client.Config().Version.IsAtLeast(V2_5_0_0) {
if t.client.Config().Version.IsAtLeast(V2_7_0_0) {
// Version 4 adds the support for new error code PRODUCER_FENCED.
req.Version = 4
} else {
// Version 3 adds ProducerId and ProducerEpoch, allowing producers to try
// to resume after an INVALID_PRODUCER_EPOCH error
req.Version = 3
}
isEpochBump = t.producerID != noProducerID && t.producerEpoch != noProducerEpoch
t.coordinatorSupportsBumpingEpoch = true
req.ProducerID = t.producerID
req.ProducerEpoch = t.producerEpoch
} else if t.client.Config().Version.IsAtLeast(V2_4_0_0) {
// Version 2 is the first flexible version.
req.Version = 2
} else if t.client.Config().Version.IsAtLeast(V2_0_0_0) {
// Version 1 is the same as version 0.
req.Version = 1
}
if isEpochBump {
err := t.transitionTo(ProducerTxnFlagInitializing, nil)
if err != nil {
return -1, -1, err
}
DebugLogger.Printf("txnmgr/init-producer-id [%s] invoking InitProducerId for the first time in order to acquire a producer ID\n",
t.transactionalID)
} else {
DebugLogger.Printf("txnmgr/init-producer-id [%s] invoking InitProducerId with current producer ID %d and epoch %d in order to bump the epoch\n",
t.transactionalID, t.producerID, t.producerEpoch)
}
attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max
exec := func(run func() (int64, int16, bool, error), err error) (int64, int16, error) {
pid := int64(-1)
pepoch := int16(-1)
for attemptsRemaining >= 0 {
var retry bool
pid, pepoch, retry, err = run()
if !retry {
return pid, pepoch, err
}
backoff := t.computeBackoff(attemptsRemaining)
Logger.Printf("txnmgr/init-producer-id [%s] retrying after %dms... (%d attempts remaining) (%s)\n",
t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err)
time.Sleep(backoff)
attemptsRemaining--
}
return -1, -1, err
}
return exec(func() (int64, int16, bool, error) {
var err error
var coordinator *Broker
if t.isTransactional() {
coordinator, err = t.client.TransactionCoordinator(t.transactionalID)
} else {
coordinator = t.client.LeastLoadedBroker()
}
if err != nil {
return -1, -1, true, err
}
response, err := coordinator.InitProducerID(req)
if err != nil {
if t.isTransactional() {
_ = coordinator.Close()
_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
}
return -1, -1, true, err
}
if response == nil {
return -1, -1, true, ErrTxnUnableToParseResponse
}
if response.Err == ErrNoError {
if isEpochBump {
t.sequenceNumbers = make(map[string]int32)
}
err := t.transitionTo(ProducerTxnFlagReady, nil)
if err != nil {
return -1, -1, true, err
}
DebugLogger.Printf("txnmgr/init-producer-id [%s] successful init producer id %+v\n",
t.transactionalID, response)
return response.ProducerID, response.ProducerEpoch, false, nil
}
switch response.Err {
// Retriable errors
case ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer, ErrOffsetsLoadInProgress:
if t.isTransactional() {
_ = coordinator.Close()
_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
}
// Fatal errors
default:
return -1, -1, false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err)
}
return -1, -1, true, response.Err
}, nil)
}
// if kafka cluster is at least 2.5.0 mark txnmngr to bump epoch else mark it as fatal.
func (t *transactionManager) abortableErrorIfPossible(err error) error {
if t.coordinatorSupportsBumpingEpoch {
t.epochBumpRequired = true
return t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, err)
}
return t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, err)
}
// End current transaction.
func (t *transactionManager) completeTransaction() error {
if t.epochBumpRequired {
err := t.transitionTo(ProducerTxnFlagInitializing, nil)
if err != nil {
return err
}
} else {
err := t.transitionTo(ProducerTxnFlagReady, nil)
if err != nil {
return err
}
}
t.lastError = nil
t.epochBumpRequired = false
t.partitionsInCurrentTxn = topicPartitionSet{}
t.pendingPartitionsInCurrentTxn = topicPartitionSet{}
t.offsetsInCurrentTxn = map[string]topicPartitionOffsets{}
return nil
}
// send EndTxn request with commit flag. (true when committing false otherwise)
func (t *transactionManager) endTxn(commit bool) error {
attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max
exec := func(run func() (bool, error), err error) error {
for attemptsRemaining >= 0 {
var retry bool
retry, err = run()
if !retry {
return err
}
backoff := t.computeBackoff(attemptsRemaining)
Logger.Printf("txnmgr/endtxn [%s] retrying after %dms... (%d attempts remaining) (%s)\n",
t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err)
time.Sleep(backoff)
attemptsRemaining--
}
return err
}
return exec(func() (bool, error) {
coordinator, err := t.client.TransactionCoordinator(t.transactionalID)
if err != nil {
return true, err
}
request := &EndTxnRequest{
TransactionalID: t.transactionalID,
ProducerEpoch: t.producerEpoch,
ProducerID: t.producerID,
TransactionResult: commit,
}
if t.client.Config().Version.IsAtLeast(V2_7_0_0) {
// Version 2 adds the support for new error code PRODUCER_FENCED.
request.Version = 2
} else if t.client.Config().Version.IsAtLeast(V2_0_0_0) {
// Version 1 is the same as version 0.
request.Version = 1
}
response, err := coordinator.EndTxn(request)
if err != nil {
// Always retry on network error
_ = coordinator.Close()
_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
return true, err
}
if response == nil {
return true, ErrTxnUnableToParseResponse
}
if response.Err == ErrNoError {
DebugLogger.Printf("txnmgr/endtxn [%s] successful to end txn %+v\n",
t.transactionalID, response)
return false, t.completeTransaction()
}
switch response.Err {
// Need to refresh coordinator
case ErrConsumerCoordinatorNotAvailable:
fallthrough
case ErrNotCoordinatorForConsumer:
_ = coordinator.Close()
_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
fallthrough
case ErrOffsetsLoadInProgress:
fallthrough
case ErrConcurrentTransactions:
// Just retry
case ErrUnknownProducerID:
fallthrough
case ErrInvalidProducerIDMapping:
return false, t.abortableErrorIfPossible(response.Err)
// Fatal errors
default:
return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err)
}
return true, response.Err
}, nil)
}
// We will try to publish associated offsets for each groups
// then send endtxn request to mark transaction as finished.
func (t *transactionManager) finishTransaction(commit bool) error {
t.mutex.Lock()
defer t.mutex.Unlock()
// Ensure no error when committing or aborting
if commit && t.currentTxnStatus()&ProducerTxnFlagInError != 0 {
return t.lastError
} else if !commit && t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 {
return t.lastError
}
// if no records has been sent don't do anything.
if len(t.partitionsInCurrentTxn) == 0 {
return t.completeTransaction()
}
epochBump := t.epochBumpRequired
// If we're aborting the transaction, so there should be no need to add offsets.
if commit && len(t.offsetsInCurrentTxn) > 0 {
for group, offsets := range t.offsetsInCurrentTxn {
newOffsets, err := t.publishOffsetsToTxn(offsets, group)
if err != nil {
t.offsetsInCurrentTxn[group] = newOffsets
return err
}
delete(t.offsetsInCurrentTxn, group)
}
}
if t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 {
return t.lastError
}
if !errors.Is(t.lastError, ErrInvalidProducerIDMapping) {
err := t.endTxn(commit)
if err != nil {
return err
}
if !epochBump {
return nil
}
}
// reset pid and epoch if needed.
return t.initializeTransactions()
}
// called before sending any transactional record
// won't do anything if current topic-partition is already added to transaction.
func (t *transactionManager) maybeAddPartitionToCurrentTxn(topic string, partition int32) {
if t.currentTxnStatus()&ProducerTxnFlagInError != 0 {
return
}
tp := topicPartition{topic: topic, partition: partition}
t.partitionInTxnLock.Lock()
defer t.partitionInTxnLock.Unlock()
if _, ok := t.partitionsInCurrentTxn[tp]; ok {
// partition is already added
return
}
t.pendingPartitionsInCurrentTxn[tp] = struct{}{}
}
// Makes a request to kafka to add a list of partitions ot the current transaction.
func (t *transactionManager) publishTxnPartitions() error {
t.partitionInTxnLock.Lock()
defer t.partitionInTxnLock.Unlock()
if t.currentTxnStatus()&ProducerTxnFlagInError != 0 {
return t.lastError
}
if len(t.pendingPartitionsInCurrentTxn) == 0 {
return nil
}
// Remove the partitions from the pending set regardless of the result. We use the presence
// of partitions in the pending set to know when it is not safe to send batches. However, if
// the partitions failed to be added and we enter an error state, we expect the batches to be
// aborted anyway. In this case, we must be able to continue sending the batches which are in
// retry for partitions that were successfully added.
removeAllPartitionsOnFatalOrAbortedError := func() {
t.pendingPartitionsInCurrentTxn = topicPartitionSet{}
}
// We only want to reduce the backoff when retrying the first AddPartition which errored out due to a
// CONCURRENT_TRANSACTIONS error since this means that the previous transaction is still completing and
// we don't want to wait too long before trying to start the new one.
//
// This is only a temporary fix, the long term solution is being tracked in
// https://issues.apache.org/jira/browse/KAFKA-5482
retryBackoff := t.client.Config().Producer.Transaction.Retry.Backoff
computeBackoff := func(attemptsRemaining int) time.Duration {
if t.client.Config().Producer.Transaction.Retry.BackoffFunc != nil {
maxRetries := t.client.Config().Producer.Transaction.Retry.Max
retries := maxRetries - attemptsRemaining
return t.client.Config().Producer.Transaction.Retry.BackoffFunc(retries, maxRetries)
}
return retryBackoff
}
attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max
exec := func(run func() (bool, error), err error) error {
for attemptsRemaining >= 0 {
var retry bool
retry, err = run()
if !retry {
return err
}
backoff := computeBackoff(attemptsRemaining)
Logger.Printf("txnmgr/add-partition-to-txn retrying after %dms... (%d attempts remaining) (%s)\n", backoff/time.Millisecond, attemptsRemaining, err)
time.Sleep(backoff)
attemptsRemaining--
}
return err
}
return exec(func() (bool, error) {
coordinator, err := t.client.TransactionCoordinator(t.transactionalID)
if err != nil {
return true, err
}
request := &AddPartitionsToTxnRequest{
TransactionalID: t.transactionalID,
ProducerID: t.producerID,
ProducerEpoch: t.producerEpoch,
TopicPartitions: t.pendingPartitionsInCurrentTxn.mapToRequest(),
}
if t.client.Config().Version.IsAtLeast(V2_7_0_0) {
// Version 2 adds the support for new error code PRODUCER_FENCED.
request.Version = 2
} else if t.client.Config().Version.IsAtLeast(V2_0_0_0) {
// Version 1 is the same as version 0.
request.Version = 1
}
addPartResponse, err := coordinator.AddPartitionsToTxn(request)
if err != nil {
_ = coordinator.Close()
_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
return true, err
}
if addPartResponse == nil {
return true, ErrTxnUnableToParseResponse
}
// remove from the list partitions that have been successfully updated
var responseErrors []error
for topic, results := range addPartResponse.Errors {
for _, response := range results {
tp := topicPartition{topic: topic, partition: response.Partition}
switch response.Err {
case ErrNoError:
// Mark partition as added to transaction
t.partitionsInCurrentTxn[tp] = struct{}{}
delete(t.pendingPartitionsInCurrentTxn, tp)
continue
case ErrConsumerCoordinatorNotAvailable:
fallthrough
case ErrNotCoordinatorForConsumer:
_ = coordinator.Close()
_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
fallthrough
case ErrUnknownTopicOrPartition:
fallthrough
case ErrOffsetsLoadInProgress:
// Retry topicPartition
case ErrConcurrentTransactions:
if len(t.partitionsInCurrentTxn) == 0 && retryBackoff > addPartitionsRetryBackoff {
retryBackoff = addPartitionsRetryBackoff
}
case ErrOperationNotAttempted:
fallthrough
case ErrTopicAuthorizationFailed:
removeAllPartitionsOnFatalOrAbortedError()
return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, response.Err)
case ErrUnknownProducerID:
fallthrough
case ErrInvalidProducerIDMapping:
removeAllPartitionsOnFatalOrAbortedError()
return false, t.abortableErrorIfPossible(response.Err)
// Fatal errors
default:
removeAllPartitionsOnFatalOrAbortedError()
return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err)
}
responseErrors = append(responseErrors, response.Err)
}
}
// handle end
if len(t.pendingPartitionsInCurrentTxn) == 0 {
DebugLogger.Printf("txnmgr/add-partition-to-txn [%s] successful to add partitions txn %+v\n",
t.transactionalID, addPartResponse)
return false, nil
}
return true, Wrap(ErrAddPartitionsToTxn, responseErrors...)
}, nil)
}
// Build a new transaction manager sharing producer client.
func newTransactionManager(conf *Config, client Client) (*transactionManager, error) {
txnmgr := &transactionManager{
producerID: noProducerID,
producerEpoch: noProducerEpoch,
client: client,
pendingPartitionsInCurrentTxn: topicPartitionSet{},
partitionsInCurrentTxn: topicPartitionSet{},
offsetsInCurrentTxn: make(map[string]topicPartitionOffsets),
status: ProducerTxnFlagUninitialized,
}
if conf.Producer.Idempotent {
txnmgr.transactionalID = conf.Producer.Transaction.ID
txnmgr.transactionTimeout = conf.Producer.Transaction.Timeout
txnmgr.sequenceNumbers = make(map[string]int32)
txnmgr.mutex = sync.Mutex{}
var err error
txnmgr.producerID, txnmgr.producerEpoch, err = txnmgr.initProducerId()
if err != nil {
return nil, err
}
Logger.Printf("txnmgr/init-producer-id [%s] obtained a ProducerId: %d and ProducerEpoch: %d\n",
txnmgr.transactionalID, txnmgr.producerID, txnmgr.producerEpoch)
}
return txnmgr, nil
}
// re-init producer-id and producer-epoch if needed.
func (t *transactionManager) initializeTransactions() (err error) {
t.producerID, t.producerEpoch, err = t.initProducerId()
return
}
golang-github-ibm-sarama-1.46.2/transaction_manager_test.go 0000664 0000000 0000000 00000074013 15072577001 0023760 0 ustar 00root root 0000000 0000000 //go:build !functional
package sarama
import (
"errors"
"testing"
"github.com/stretchr/testify/require"
)
func TestTransitions(t *testing.T) {
testError := errors.New("test")
type testCase struct {
transitions []ProducerTxnStatusFlag
expectedError error
}
testCases := []testCase{
{
transitions: []ProducerTxnStatusFlag{
ProducerTxnFlagUninitialized,
ProducerTxnFlagReady,
ProducerTxnFlagInTransaction,
ProducerTxnFlagEndTransaction | ProducerTxnFlagCommittingTransaction,
ProducerTxnFlagReady,
},
expectedError: nil,
},
{
transitions: []ProducerTxnStatusFlag{
ProducerTxnFlagUninitialized,
ProducerTxnFlagReady,
ProducerTxnFlagInTransaction,
ProducerTxnFlagEndTransaction | ProducerTxnFlagAbortingTransaction,
ProducerTxnFlagReady,
},
expectedError: nil,
},
{
transitions: []ProducerTxnStatusFlag{
ProducerTxnFlagUninitialized,
ProducerTxnFlagReady,
ProducerTxnFlagInTransaction,
ProducerTxnFlagEndTransaction,
ProducerTxnFlagInError | ProducerTxnFlagAbortableError,
},
expectedError: testError,
},
{
transitions: []ProducerTxnStatusFlag{
ProducerTxnFlagInError | ProducerTxnFlagAbortableError,
ProducerTxnFlagEndTransaction | ProducerTxnFlagAbortingTransaction,
ProducerTxnFlagReady,
},
expectedError: nil,
},
{
transitions: []ProducerTxnStatusFlag{
ProducerTxnFlagInError | ProducerTxnFlagAbortableError,
ProducerTxnFlagEndTransaction | ProducerTxnFlagCommittingTransaction,
},
expectedError: ErrTransitionNotAllowed,
},
{
transitions: []ProducerTxnStatusFlag{
ProducerTxnFlagInError | ProducerTxnFlagFatalError,
ProducerTxnFlagEndTransaction | ProducerTxnFlagAbortingTransaction,
},
expectedError: ErrTransitionNotAllowed,
},
}
for _, tc := range testCases {
txnmgr := transactionManager{}
txnmgr.status = tc.transitions[0]
var lastError error
for i := 1; i < len(tc.transitions); i++ {
var baseErr error
if tc.transitions[i]&ProducerTxnFlagInError != 0 {
baseErr = testError
}
lastError = txnmgr.transitionTo(tc.transitions[i], baseErr)
}
require.Equal(t, tc.expectedError, lastError, tc)
}
}
func TestTxnmgrInitProducerIdTxn(t *testing.T) {
broker := NewMockBroker(t, 1)
defer broker.Close()
metadataLeader := new(MetadataResponse)
metadataLeader.Version = 4
metadataLeader.ControllerID = broker.brokerID
metadataLeader.AddBroker(broker.Addr(), broker.BrokerID())
broker.Returns(metadataLeader)
config := NewTestConfig()
config.Producer.Idempotent = true
config.Producer.Transaction.ID = "test"
config.Version = V0_11_0_0
config.Producer.RequiredAcks = WaitForAll
config.Net.MaxOpenRequests = 1
client, err := NewClient([]string{broker.Addr()}, config)
require.NoError(t, err)
defer client.Close()
findCoordinatorResponse := FindCoordinatorResponse{
Coordinator: client.Brokers()[0],
Err: ErrNoError,
Version: 1,
}
broker.Returns(&findCoordinatorResponse)
producerIdResponse := &InitProducerIDResponse{
Err: ErrNoError,
ProducerID: 1,
ProducerEpoch: 0,
}
broker.Returns(producerIdResponse)
txmng, err := newTransactionManager(config, client)
require.NoError(t, err)
require.Equal(t, int64(1), txmng.producerID)
require.Equal(t, int16(0), txmng.producerEpoch)
require.Equal(t, ProducerTxnFlagReady, txmng.status)
}
// TestTxnmgrInitProducerIdTxnCoordinatorLoading ensure we retry initProducerId when either FindCoordinator or InitProducerID returns ErrOffsetsLoadInProgress
func TestTxnmgrInitProducerIdTxnCoordinatorLoading(t *testing.T) {
config := NewTestConfig()
config.Producer.Idempotent = true
config.Producer.Transaction.ID = "txid-group"
config.Version = V0_11_0_0
config.Producer.RequiredAcks = WaitForAll
config.Net.MaxOpenRequests = 1
broker := NewMockBroker(t, 1)
defer broker.Close()
broker.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetController(broker.BrokerID()).
SetBroker(broker.Addr(), broker.BrokerID()),
"FindCoordinatorRequest": NewMockSequence(
NewMockFindCoordinatorResponse(t).
SetError(CoordinatorTransaction, "txid-group", ErrOffsetsLoadInProgress),
NewMockFindCoordinatorResponse(t).
SetError(CoordinatorTransaction, "txid-group", ErrOffsetsLoadInProgress),
NewMockFindCoordinatorResponse(t).
SetCoordinator(CoordinatorTransaction, "txid-group", broker),
),
"InitProducerIDRequest": NewMockSequence(
NewMockInitProducerIDResponse(t).
SetError(ErrOffsetsLoadInProgress),
NewMockInitProducerIDResponse(t).
SetError(ErrOffsetsLoadInProgress),
NewMockInitProducerIDResponse(t).
SetProducerID(1).
SetProducerEpoch(0),
),
})
client, err := NewClient([]string{broker.Addr()}, config)
require.NoError(t, err)
defer client.Close()
txmng, err := newTransactionManager(config, client)
require.NoError(t, err)
require.Equal(t, int64(1), txmng.producerID)
require.Equal(t, int16(0), txmng.producerEpoch)
require.Equal(t, ProducerTxnFlagReady, txmng.status)
}
func TestMaybeAddPartitionToCurrentTxn(t *testing.T) {
type testCase struct {
initialFlags ProducerTxnStatusFlag
initialPartitionsInCurrentTxn topicPartitionSet
initialPendingPartitionsInCurrentTxn topicPartitionSet
tpToAdd map[string][]int32
expectedPendingPartitions topicPartitionSet
expectedPartitionsInTxn topicPartitionSet
}
testCases := []testCase{
{
initialFlags: ProducerTxnFlagInTransaction,
initialPartitionsInCurrentTxn: topicPartitionSet{
{topic: "test-topic", partition: 0}: struct{}{},
},
initialPendingPartitionsInCurrentTxn: topicPartitionSet{},
tpToAdd: map[string][]int32{
"test-topic": {
0,
},
},
expectedPendingPartitions: topicPartitionSet{},
expectedPartitionsInTxn: topicPartitionSet{
{topic: "test-topic", partition: 0}: struct{}{},
},
},
{
initialFlags: ProducerTxnFlagInTransaction,
initialPartitionsInCurrentTxn: topicPartitionSet{},
initialPendingPartitionsInCurrentTxn: topicPartitionSet{},
tpToAdd: map[string][]int32{
"test-topic": {
0,
},
},
expectedPendingPartitions: topicPartitionSet{
{topic: "test-topic", partition: 0}: struct{}{},
},
expectedPartitionsInTxn: topicPartitionSet{},
},
{
initialFlags: ProducerTxnFlagInTransaction,
initialPartitionsInCurrentTxn: topicPartitionSet{
{topic: "test-topic", partition: 0}: struct{}{},
},
initialPendingPartitionsInCurrentTxn: topicPartitionSet{},
tpToAdd: map[string][]int32{
"test-topic": {
0,
},
},
expectedPendingPartitions: topicPartitionSet{},
expectedPartitionsInTxn: topicPartitionSet{
{topic: "test-topic", partition: 0}: struct{}{},
},
},
{
initialFlags: ProducerTxnFlagInTransaction,
initialPartitionsInCurrentTxn: topicPartitionSet{},
initialPendingPartitionsInCurrentTxn: topicPartitionSet{
{topic: "test-topic", partition: 0}: struct{}{},
},
tpToAdd: map[string][]int32{
"test-topic": {
0,
},
},
expectedPendingPartitions: topicPartitionSet{
{topic: "test-topic", partition: 0}: struct{}{},
},
expectedPartitionsInTxn: topicPartitionSet{},
},
{
initialFlags: ProducerTxnFlagInError,
initialPartitionsInCurrentTxn: topicPartitionSet{},
initialPendingPartitionsInCurrentTxn: topicPartitionSet{},
tpToAdd: map[string][]int32{
"test-topic": {
0,
},
},
expectedPendingPartitions: topicPartitionSet{},
expectedPartitionsInTxn: topicPartitionSet{},
},
}
broker := NewMockBroker(t, 1)
defer broker.Close()
metadataLeader := new(MetadataResponse)
metadataLeader.Version = 4
metadataLeader.ControllerID = broker.brokerID
metadataLeader.AddBroker(broker.Addr(), broker.BrokerID())
metadataLeader.AddTopic("test-topic", ErrNoError)
metadataLeader.AddTopicPartition("test-topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError)
config := NewTestConfig()
config.Producer.Idempotent = true
config.Producer.Transaction.ID = "test"
config.Version = V0_11_0_0
config.Producer.RequiredAcks = WaitForAll
config.Net.MaxOpenRequests = 1
config.Producer.Transaction.Retry.Max = 0
config.Producer.Transaction.Retry.Backoff = 0
for _, tc := range testCases {
func() {
broker.Returns(metadataLeader)
client, err := NewClient([]string{broker.Addr()}, config)
require.NoError(t, err)
defer client.Close()
findCoordinatorResponse := FindCoordinatorResponse{
Coordinator: client.Brokers()[0],
Err: ErrNoError,
Version: 1,
}
broker.Returns(&findCoordinatorResponse)
producerIdResponse := &InitProducerIDResponse{
Err: ErrNoError,
ProducerID: 1,
ProducerEpoch: 0,
}
broker.Returns(producerIdResponse)
txmng, err := newTransactionManager(config, client)
require.NoError(t, err)
txmng.partitionsInCurrentTxn = tc.initialPartitionsInCurrentTxn
txmng.pendingPartitionsInCurrentTxn = tc.initialPendingPartitionsInCurrentTxn
txmng.status = tc.initialFlags
for topic, partitions := range tc.tpToAdd {
for _, partition := range partitions {
txmng.maybeAddPartitionToCurrentTxn(topic, partition)
}
}
require.Equal(t, tc.expectedPartitionsInTxn, txmng.partitionsInCurrentTxn, tc)
require.Equal(t, tc.expectedPendingPartitions, txmng.pendingPartitionsInCurrentTxn, tc)
}()
}
}
func TestAddOffsetsToTxn(t *testing.T) {
type testCase struct {
brokerErr KError
initialFlags ProducerTxnStatusFlag
expectedFlags ProducerTxnStatusFlag
expectedError error
newOffsets topicPartitionOffsets
}
originalOffsets := topicPartitionOffsets{
topicPartition{topic: "test-topic", partition: 0}: {
Partition: 0,
Offset: 0,
},
}
testCases := []testCase{
{
brokerErr: ErrNoError,
initialFlags: ProducerTxnFlagInTransaction,
expectedFlags: ProducerTxnFlagInTransaction,
expectedError: nil,
newOffsets: topicPartitionOffsets{},
},
{
brokerErr: ErrConsumerCoordinatorNotAvailable,
initialFlags: ProducerTxnFlagInTransaction,
expectedFlags: ProducerTxnFlagInTransaction,
expectedError: ErrConsumerCoordinatorNotAvailable,
newOffsets: originalOffsets,
},
{
brokerErr: ErrNotCoordinatorForConsumer,
initialFlags: ProducerTxnFlagInTransaction,
expectedFlags: ProducerTxnFlagInTransaction,
expectedError: ErrNotCoordinatorForConsumer,
newOffsets: originalOffsets,
},
{
brokerErr: ErrOffsetsLoadInProgress,
initialFlags: ProducerTxnFlagInTransaction,
expectedFlags: ProducerTxnFlagInTransaction,
expectedError: ErrOffsetsLoadInProgress,
newOffsets: originalOffsets,
},
{
brokerErr: ErrConcurrentTransactions,
initialFlags: ProducerTxnFlagInTransaction,
expectedFlags: ProducerTxnFlagInTransaction,
expectedError: ErrConcurrentTransactions,
newOffsets: originalOffsets,
},
{
brokerErr: ErrUnknownProducerID,
initialFlags: ProducerTxnFlagInTransaction,
expectedFlags: ProducerTxnFlagFatalError,
expectedError: ErrUnknownProducerID,
newOffsets: originalOffsets,
},
{
brokerErr: ErrInvalidProducerIDMapping,
initialFlags: ProducerTxnFlagInTransaction,
expectedFlags: ProducerTxnFlagFatalError,
expectedError: ErrInvalidProducerIDMapping,
newOffsets: originalOffsets,
},
{
brokerErr: ErrGroupAuthorizationFailed,
initialFlags: ProducerTxnFlagInTransaction,
expectedFlags: ProducerTxnFlagAbortableError,
expectedError: ErrGroupAuthorizationFailed,
newOffsets: originalOffsets,
},
}
broker := NewMockBroker(t, 1)
defer broker.Close()
metadataLeader := new(MetadataResponse)
metadataLeader.Version = 4
metadataLeader.ControllerID = broker.brokerID
metadataLeader.AddBroker(broker.Addr(), broker.BrokerID())
metadataLeader.AddTopic("test-topic", ErrNoError)
metadataLeader.AddTopicPartition("test-topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError)
config := NewTestConfig()
config.Producer.Idempotent = true
config.Producer.Transaction.ID = "test"
config.Version = V0_11_0_0
config.Producer.RequiredAcks = WaitForAll
config.Net.MaxOpenRequests = 1
config.Producer.Transaction.Retry.Max = 0
config.Producer.Transaction.Retry.Backoff = 0
offsets := topicPartitionOffsets{
topicPartition{topic: "test-topic", partition: 0}: {
Partition: 0,
Offset: 0,
},
}
for _, tc := range testCases {
func() {
broker.Returns(metadataLeader)
client, err := NewClient([]string{broker.Addr()}, config)
require.NoError(t, err)
defer client.Close()
findCoordinatorResponse := FindCoordinatorResponse{
Coordinator: client.Brokers()[0],
Err: ErrNoError,
Version: 1,
}
broker.Returns(&findCoordinatorResponse)
producerIdResponse := &InitProducerIDResponse{
Err: ErrNoError,
ProducerID: 1,
ProducerEpoch: 0,
}
broker.Returns(producerIdResponse)
txmng, err := newTransactionManager(config, client)
require.NoError(t, err)
txmng.status = tc.initialFlags
broker.Returns(&AddOffsetsToTxnResponse{
Err: tc.brokerErr,
})
if errors.Is(tc.brokerErr, ErrRequestTimedOut) ||
errors.Is(tc.brokerErr, ErrConsumerCoordinatorNotAvailable) ||
errors.Is(tc.brokerErr, ErrNotCoordinatorForConsumer) {
broker.Returns(&FindCoordinatorResponse{
Coordinator: client.Brokers()[0],
Err: ErrNoError,
Version: 1,
})
}
if tc.brokerErr == ErrNoError {
broker.Returns(&FindCoordinatorResponse{
Coordinator: client.Brokers()[0],
Err: ErrNoError,
Version: 1,
})
broker.Returns(&TxnOffsetCommitResponse{
Topics: map[string][]*PartitionError{
"test-topic": {
{
Partition: 0,
Err: ErrNoError,
},
},
},
})
}
newOffsets, err := txmng.publishOffsetsToTxn(offsets, "test-group")
if tc.expectedError != nil {
require.Equal(t, tc.expectedError.Error(), err.Error())
} else {
require.Equal(t, tc.expectedError, err)
}
require.Equal(t, tc.newOffsets, newOffsets)
require.True(t, tc.expectedFlags&txmng.status != 0)
}()
}
}
func TestTxnOffsetsCommit(t *testing.T) {
type testCase struct {
brokerErr KError
initialFlags ProducerTxnStatusFlag
initialOffsets topicPartitionOffsets
expectedFlags ProducerTxnStatusFlag
expectedError error
expectedOffsets topicPartitionOffsets
}
originalOffsets := topicPartitionOffsets{
topicPartition{topic: "test-topic", partition: 0}: {
Partition: 0,
Offset: 0,
},
}
testCases := []testCase{
{
brokerErr: ErrConsumerCoordinatorNotAvailable,
initialFlags: ProducerTxnFlagInTransaction,
initialOffsets: topicPartitionOffsets{
topicPartition{topic: "test-topic", partition: 0}: {
Partition: 0,
Offset: 0,
},
},
expectedFlags: ProducerTxnFlagInTransaction,
expectedError: Wrap(ErrTxnOffsetCommit, ErrConsumerCoordinatorNotAvailable),
expectedOffsets: originalOffsets,
},
{
brokerErr: ErrNotCoordinatorForConsumer,
initialFlags: ProducerTxnFlagInTransaction,
initialOffsets: topicPartitionOffsets{
topicPartition{topic: "test-topic", partition: 0}: {
Partition: 0,
Offset: 0,
},
},
expectedFlags: ProducerTxnFlagInTransaction,
expectedError: Wrap(ErrTxnOffsetCommit, ErrNotCoordinatorForConsumer),
expectedOffsets: originalOffsets,
},
{
brokerErr: ErrNoError,
initialFlags: ProducerTxnFlagInTransaction,
initialOffsets: topicPartitionOffsets{
topicPartition{topic: "test-topic", partition: 0}: {
Partition: 0,
Offset: 0,
},
},
expectedFlags: ProducerTxnFlagInTransaction,
expectedError: nil,
expectedOffsets: topicPartitionOffsets{},
},
{
brokerErr: ErrUnknownTopicOrPartition,
initialFlags: ProducerTxnFlagInTransaction,
initialOffsets: topicPartitionOffsets{
topicPartition{topic: "test-topic", partition: 0}: {
Partition: 0,
Offset: 0,
},
},
expectedFlags: ProducerTxnFlagInTransaction,
expectedError: Wrap(ErrTxnOffsetCommit, ErrUnknownTopicOrPartition),
expectedOffsets: originalOffsets,
},
{
brokerErr: ErrOffsetsLoadInProgress,
initialFlags: ProducerTxnFlagInTransaction,
initialOffsets: topicPartitionOffsets{
topicPartition{topic: "test-topic", partition: 0}: {
Partition: 0,
Offset: 0,
},
},
expectedFlags: ProducerTxnFlagInTransaction,
expectedError: Wrap(ErrTxnOffsetCommit, ErrOffsetsLoadInProgress),
expectedOffsets: originalOffsets,
},
{
brokerErr: ErrIllegalGeneration,
initialFlags: ProducerTxnFlagInTransaction,
initialOffsets: topicPartitionOffsets{
topicPartition{topic: "test-topic", partition: 0}: {
Partition: 0,
Offset: 0,
},
},
expectedFlags: ProducerTxnFlagAbortableError,
expectedError: ErrIllegalGeneration,
expectedOffsets: originalOffsets,
},
{
brokerErr: ErrUnknownMemberId,
initialFlags: ProducerTxnFlagInTransaction,
initialOffsets: topicPartitionOffsets{
topicPartition{topic: "test-topic", partition: 0}: {
Partition: 0,
Offset: 0,
},
},
expectedFlags: ProducerTxnFlagAbortableError,
expectedError: ErrUnknownMemberId,
expectedOffsets: originalOffsets,
},
{
brokerErr: ErrFencedInstancedId,
initialFlags: ProducerTxnFlagInTransaction,
initialOffsets: topicPartitionOffsets{
topicPartition{topic: "test-topic", partition: 0}: {
Partition: 0,
Offset: 0,
},
},
expectedFlags: ProducerTxnFlagAbortableError,
expectedError: ErrFencedInstancedId,
expectedOffsets: originalOffsets,
},
{
brokerErr: ErrGroupAuthorizationFailed,
initialFlags: ProducerTxnFlagInTransaction,
initialOffsets: topicPartitionOffsets{
topicPartition{topic: "test-topic", partition: 0}: {
Partition: 0,
Offset: 0,
},
},
expectedFlags: ProducerTxnFlagAbortableError,
expectedError: ErrGroupAuthorizationFailed,
expectedOffsets: originalOffsets,
},
{
brokerErr: ErrKafkaStorageError,
initialFlags: ProducerTxnFlagInTransaction,
initialOffsets: topicPartitionOffsets{
topicPartition{topic: "test-topic", partition: 0}: {
Partition: 0,
Offset: 0,
},
},
expectedFlags: ProducerTxnFlagFatalError,
expectedError: ErrKafkaStorageError,
expectedOffsets: originalOffsets,
},
}
broker := NewMockBroker(t, 1)
defer broker.Close()
config := NewTestConfig()
config.Producer.Idempotent = true
config.Producer.Transaction.ID = "test"
config.Version = V0_11_0_0
config.Producer.RequiredAcks = WaitForAll
config.Net.MaxOpenRequests = 1
config.Producer.Transaction.Retry.Max = 0
config.Producer.Transaction.Retry.Backoff = 0
metadataLeader := new(MetadataResponse)
metadataLeader.Version = 4
metadataLeader.ControllerID = broker.brokerID
metadataLeader.AddBroker(broker.Addr(), broker.BrokerID())
metadataLeader.AddTopic("test-topic", ErrNoError)
metadataLeader.AddTopicPartition("test-topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError)
for _, tc := range testCases {
func() {
broker.Returns(metadataLeader)
client, err := NewClient([]string{broker.Addr()}, config)
require.NoError(t, err)
defer client.Close()
findCoordinatorResponse := FindCoordinatorResponse{
Coordinator: client.Brokers()[0],
Err: ErrNoError,
Version: 1,
}
broker.Returns(&findCoordinatorResponse)
producerIdResponse := &InitProducerIDResponse{
Err: ErrNoError,
ProducerID: 1,
ProducerEpoch: 0,
}
broker.Returns(producerIdResponse)
txmng, err := newTransactionManager(config, client)
require.NoError(t, err)
txmng.status = tc.initialFlags
broker.Returns(&AddOffsetsToTxnResponse{
Err: ErrNoError,
})
broker.Returns(&FindCoordinatorResponse{
Coordinator: client.Brokers()[0],
Err: ErrNoError,
Version: 1,
})
broker.Returns(&TxnOffsetCommitResponse{
Topics: map[string][]*PartitionError{
"test-topic": {
{
Partition: 0,
Err: tc.brokerErr,
},
},
},
})
if errors.Is(tc.brokerErr, ErrRequestTimedOut) ||
errors.Is(tc.brokerErr, ErrConsumerCoordinatorNotAvailable) ||
errors.Is(tc.brokerErr, ErrNotCoordinatorForConsumer) {
broker.Returns(&FindCoordinatorResponse{
Coordinator: client.Brokers()[0],
Err: ErrNoError,
Version: 1,
})
}
newOffsets, err := txmng.publishOffsetsToTxn(tc.initialOffsets, "test-group")
if tc.expectedError != nil {
require.Equal(t, tc.expectedError.Error(), err.Error())
} else {
require.Equal(t, tc.expectedError, err)
}
require.Equal(t, tc.expectedOffsets, newOffsets)
require.True(t, tc.expectedFlags&txmng.status != 0)
}()
}
}
func TestEndTxn(t *testing.T) {
type testCase struct {
brokerErr KError
commit bool
expectedFlags ProducerTxnStatusFlag
expectedError error
}
testCases := []testCase{
{
brokerErr: ErrNoError,
commit: true,
expectedFlags: ProducerTxnFlagReady,
expectedError: nil,
},
{
brokerErr: ErrConsumerCoordinatorNotAvailable,
commit: true,
expectedFlags: ProducerTxnFlagEndTransaction,
expectedError: ErrConsumerCoordinatorNotAvailable,
},
{
brokerErr: ErrNotCoordinatorForConsumer,
commit: true,
expectedFlags: ProducerTxnFlagEndTransaction,
expectedError: ErrNotCoordinatorForConsumer,
},
{
brokerErr: ErrOffsetsLoadInProgress,
commit: true,
expectedFlags: ProducerTxnFlagEndTransaction,
expectedError: ErrOffsetsLoadInProgress,
},
{
brokerErr: ErrConcurrentTransactions,
commit: true,
expectedFlags: ProducerTxnFlagEndTransaction,
expectedError: ErrConcurrentTransactions,
},
{
brokerErr: ErrUnknownProducerID,
commit: true,
expectedFlags: ProducerTxnFlagFatalError,
expectedError: ErrUnknownProducerID,
},
{
brokerErr: ErrInvalidProducerIDMapping,
commit: true,
expectedFlags: ProducerTxnFlagFatalError,
expectedError: ErrInvalidProducerIDMapping,
},
}
broker := NewMockBroker(t, 1)
defer broker.Close()
metadataLeader := new(MetadataResponse)
metadataLeader.Version = 4
metadataLeader.ControllerID = broker.brokerID
metadataLeader.AddBroker(broker.Addr(), broker.BrokerID())
metadataLeader.AddTopic("test-topic", ErrNoError)
metadataLeader.AddTopicPartition("test-topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError)
config := NewTestConfig()
config.Producer.Idempotent = true
config.Producer.Transaction.ID = "test"
config.Version = V0_11_0_0
config.Producer.RequiredAcks = WaitForAll
config.Net.MaxOpenRequests = 1
config.Producer.Transaction.Retry.Max = 0
config.Producer.Transaction.Retry.Backoff = 0
for _, tc := range testCases {
func() {
broker.Returns(metadataLeader)
client, err := NewClient([]string{broker.Addr()}, config)
require.NoError(t, err)
defer client.Close()
findCoordinatorResponse := FindCoordinatorResponse{
Coordinator: client.Brokers()[0],
Err: ErrNoError,
Version: 1,
}
broker.Returns(&findCoordinatorResponse)
producerIdResponse := &InitProducerIDResponse{
Err: ErrNoError,
ProducerID: 1,
ProducerEpoch: 0,
}
broker.Returns(producerIdResponse)
txmng, err := newTransactionManager(config, client)
require.NoError(t, err)
txmng.status = ProducerTxnFlagEndTransaction
endTxnResponse := &EndTxnResponse{
Err: tc.brokerErr,
ThrottleTime: 0,
}
broker.Returns(endTxnResponse)
if errors.Is(tc.brokerErr, ErrRequestTimedOut) ||
errors.Is(tc.brokerErr, ErrConsumerCoordinatorNotAvailable) ||
errors.Is(tc.brokerErr, ErrNotCoordinatorForConsumer) {
broker.Returns(&FindCoordinatorResponse{
Coordinator: client.Brokers()[0],
Err: ErrNoError,
Version: 1,
})
}
err = txmng.endTxn(tc.commit)
require.Equal(t, tc.expectedError, err)
require.True(t, txmng.currentTxnStatus()&tc.expectedFlags != 0)
}()
}
}
func TestPublishPartitionToTxn(t *testing.T) {
type testCase struct {
brokerErr KError
expectedFlags ProducerTxnStatusFlag
expectedError error
expectedPendingPartitions topicPartitionSet
expectedPartitionsInTxn topicPartitionSet
}
initialPendingTopicPartitionSet := topicPartitionSet{
{
topic: "test-topic",
partition: 0,
}: struct{}{},
}
testCases := []testCase{
{
brokerErr: ErrNoError,
expectedFlags: ProducerTxnFlagInTransaction,
expectedError: nil,
expectedPendingPartitions: topicPartitionSet{},
expectedPartitionsInTxn: initialPendingTopicPartitionSet,
},
{
brokerErr: ErrConsumerCoordinatorNotAvailable,
expectedFlags: ProducerTxnFlagInTransaction,
expectedError: Wrap(ErrAddPartitionsToTxn, ErrConsumerCoordinatorNotAvailable),
expectedPartitionsInTxn: topicPartitionSet{},
expectedPendingPartitions: initialPendingTopicPartitionSet,
},
{
brokerErr: ErrNotCoordinatorForConsumer,
expectedFlags: ProducerTxnFlagInTransaction,
expectedError: Wrap(ErrAddPartitionsToTxn, ErrNotCoordinatorForConsumer),
expectedPartitionsInTxn: topicPartitionSet{},
expectedPendingPartitions: initialPendingTopicPartitionSet,
},
{
brokerErr: ErrUnknownTopicOrPartition,
expectedFlags: ProducerTxnFlagInTransaction,
expectedError: Wrap(ErrAddPartitionsToTxn, ErrUnknownTopicOrPartition),
expectedPartitionsInTxn: topicPartitionSet{},
expectedPendingPartitions: initialPendingTopicPartitionSet,
},
{
brokerErr: ErrOffsetsLoadInProgress,
expectedFlags: ProducerTxnFlagInTransaction,
expectedError: Wrap(ErrAddPartitionsToTxn, ErrOffsetsLoadInProgress),
expectedPartitionsInTxn: topicPartitionSet{},
expectedPendingPartitions: initialPendingTopicPartitionSet,
},
{
brokerErr: ErrConcurrentTransactions,
expectedFlags: ProducerTxnFlagInTransaction,
expectedError: Wrap(ErrAddPartitionsToTxn, ErrConcurrentTransactions),
expectedPartitionsInTxn: topicPartitionSet{},
expectedPendingPartitions: initialPendingTopicPartitionSet,
},
{
brokerErr: ErrOperationNotAttempted,
expectedFlags: ProducerTxnFlagAbortableError,
expectedError: ErrOperationNotAttempted,
expectedPartitionsInTxn: topicPartitionSet{},
expectedPendingPartitions: topicPartitionSet{},
},
{
brokerErr: ErrTopicAuthorizationFailed,
expectedFlags: ProducerTxnFlagAbortableError,
expectedError: ErrTopicAuthorizationFailed,
expectedPartitionsInTxn: topicPartitionSet{},
expectedPendingPartitions: topicPartitionSet{},
},
{
brokerErr: ErrUnknownProducerID,
expectedFlags: ProducerTxnFlagFatalError,
expectedError: ErrUnknownProducerID,
expectedPartitionsInTxn: topicPartitionSet{},
expectedPendingPartitions: topicPartitionSet{},
},
{
brokerErr: ErrInvalidProducerIDMapping,
expectedFlags: ProducerTxnFlagFatalError,
expectedError: ErrInvalidProducerIDMapping,
expectedPartitionsInTxn: topicPartitionSet{},
expectedPendingPartitions: topicPartitionSet{},
},
{
brokerErr: ErrKafkaStorageError,
expectedFlags: ProducerTxnFlagFatalError,
expectedError: ErrKafkaStorageError,
expectedPartitionsInTxn: topicPartitionSet{},
expectedPendingPartitions: topicPartitionSet{},
},
}
broker := NewMockBroker(t, 1)
defer broker.Close()
metadataLeader := new(MetadataResponse)
metadataLeader.Version = 4
metadataLeader.ControllerID = broker.brokerID
metadataLeader.AddBroker(broker.Addr(), broker.BrokerID())
metadataLeader.AddTopic("test-topic", ErrNoError)
metadataLeader.AddTopicPartition("test-topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError)
config := NewTestConfig()
config.Producer.Idempotent = true
config.Producer.Transaction.ID = "test"
config.Version = V0_11_0_0
config.Producer.RequiredAcks = WaitForAll
config.Net.MaxOpenRequests = 1
config.Producer.Transaction.Retry.Max = 0
config.Producer.Transaction.Retry.Backoff = 0
for _, tc := range testCases {
func() {
broker.Returns(metadataLeader)
client, err := NewClient([]string{broker.Addr()}, config)
require.NoError(t, err)
defer client.Close()
findCoordinatorResponse := FindCoordinatorResponse{
Coordinator: client.Brokers()[0],
Err: ErrNoError,
Version: 1,
}
broker.Returns(&findCoordinatorResponse)
producerIdResponse := &InitProducerIDResponse{
Err: ErrNoError,
ProducerID: 1,
ProducerEpoch: 0,
}
broker.Returns(producerIdResponse)
txmng, err := newTransactionManager(config, client)
require.NoError(t, err)
txmng.status = ProducerTxnFlagInTransaction
txmng.pendingPartitionsInCurrentTxn = topicPartitionSet{
{
topic: "test-topic",
partition: 0,
}: struct{}{},
}
broker.Returns(&AddPartitionsToTxnResponse{
Errors: map[string][]*PartitionError{
"test-topic": {
{
Partition: 0,
Err: tc.brokerErr,
},
},
},
})
if errors.Is(tc.brokerErr, ErrRequestTimedOut) ||
errors.Is(tc.brokerErr, ErrConsumerCoordinatorNotAvailable) ||
errors.Is(tc.brokerErr, ErrNotCoordinatorForConsumer) {
broker.Returns(&FindCoordinatorResponse{
Coordinator: client.Brokers()[0],
Err: ErrNoError,
Version: 1,
})
}
err = txmng.publishTxnPartitions()
if tc.expectedError != nil {
require.Equal(t, tc.expectedError.Error(), err.Error(), tc)
} else {
require.Equal(t, tc.expectedError, err, tc)
}
require.True(t, txmng.status&tc.expectedFlags != 0, tc)
require.Equal(t, tc.expectedPartitionsInTxn, txmng.partitionsInCurrentTxn, tc)
require.Equal(t, tc.expectedPendingPartitions, txmng.pendingPartitionsInCurrentTxn, tc)
}()
}
}
golang-github-ibm-sarama-1.46.2/txn_offset_commit_request.go 0000664 0000000 0000000 00000006703 15072577001 0024202 0 ustar 00root root 0000000 0000000 package sarama
type TxnOffsetCommitRequest struct {
Version int16
TransactionalID string
GroupID string
ProducerID int64
ProducerEpoch int16
Topics map[string][]*PartitionOffsetMetadata
}
func (t *TxnOffsetCommitRequest) setVersion(v int16) {
t.Version = v
}
func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error {
if err := pe.putString(t.TransactionalID); err != nil {
return err
}
if err := pe.putString(t.GroupID); err != nil {
return err
}
pe.putInt64(t.ProducerID)
pe.putInt16(t.ProducerEpoch)
if err := pe.putArrayLength(len(t.Topics)); err != nil {
return err
}
for topic, partitions := range t.Topics {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putArrayLength(len(partitions)); err != nil {
return err
}
for _, partition := range partitions {
if err := partition.encode(pe, t.Version); err != nil {
return err
}
}
}
return nil
}
func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
t.Version = version
if t.TransactionalID, err = pd.getString(); err != nil {
return err
}
if t.GroupID, err = pd.getString(); err != nil {
return err
}
if t.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if t.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
t.Topics = make(map[string][]*PartitionOffsetMetadata)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
m, err := pd.getArrayLength()
if err != nil {
return err
}
t.Topics[topic] = make([]*PartitionOffsetMetadata, m)
for j := 0; j < m; j++ {
partitionOffsetMetadata := new(PartitionOffsetMetadata)
if err := partitionOffsetMetadata.decode(pd, version); err != nil {
return err
}
t.Topics[topic][j] = partitionOffsetMetadata
}
}
return nil
}
func (a *TxnOffsetCommitRequest) key() int16 {
return apiKeyTxnOffsetCommit
}
func (a *TxnOffsetCommitRequest) version() int16 {
return a.Version
}
func (a *TxnOffsetCommitRequest) headerVersion() int16 {
return 1
}
func (a *TxnOffsetCommitRequest) isValidVersion() bool {
return a.Version >= 0 && a.Version <= 2
}
func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion {
switch a.Version {
case 2:
return V2_1_0_0
case 1:
return V2_0_0_0
case 0:
return V0_11_0_0
default:
return V2_1_0_0
}
}
type PartitionOffsetMetadata struct {
// Partition contains the index of the partition within the topic.
Partition int32
// Offset contains the message offset to be committed.
Offset int64
// LeaderEpoch contains the leader epoch of the last consumed record.
LeaderEpoch int32
// Metadata contains any associated metadata the client wants to keep.
Metadata *string
}
func (p *PartitionOffsetMetadata) encode(pe packetEncoder, version int16) error {
pe.putInt32(p.Partition)
pe.putInt64(p.Offset)
if version >= 2 {
pe.putInt32(p.LeaderEpoch)
}
if err := pe.putNullableString(p.Metadata); err != nil {
return err
}
return nil
}
func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err error) {
if p.Partition, err = pd.getInt32(); err != nil {
return err
}
if p.Offset, err = pd.getInt64(); err != nil {
return err
}
if version >= 2 {
if p.LeaderEpoch, err = pd.getInt32(); err != nil {
return err
}
}
if p.Metadata, err = pd.getNullableString(); err != nil {
return err
}
return nil
}
golang-github-ibm-sarama-1.46.2/txn_offset_commit_request_test.go 0000664 0000000 0000000 00000003023 15072577001 0025231 0 ustar 00root root 0000000 0000000 //go:build !functional
package sarama
import "testing"
var (
txnOffsetCommitRequest = []byte{
0, 3, 't', 'x', 'n',
0, 7, 'g', 'r', 'o', 'u', 'p', 'i', 'd',
0, 0, 0, 0, 0, 0, 31, 64, // producer ID
0, 1, // producer epoch
0, 0, 0, 1, // 1 topic
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, 0, 1, // 1 partition
0, 0, 0, 2, // partition no 2
0, 0, 0, 0, 0, 0, 0, 123,
255, 255, // no meta data
}
txnOffsetCommitRequestV2 = []byte{
0, 3, 't', 'x', 'n',
0, 7, 'g', 'r', 'o', 'u', 'p', 'i', 'd',
0, 0, 0, 0, 0, 0, 31, 64, // producer ID
0, 1, // producer epoch
0, 0, 0, 1, // 1 topic
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, 0, 1, // 1 partition
0, 0, 0, 2, // partition no 2
0, 0, 0, 0, 0, 0, 0, 123,
0, 0, 0, 9, // leader epoch
255, 255, // no meta data
}
)
func TestTxnOffsetCommitRequest(t *testing.T) {
req := &TxnOffsetCommitRequest{
TransactionalID: "txn",
GroupID: "groupid",
ProducerID: 8000,
ProducerEpoch: 1,
Topics: map[string][]*PartitionOffsetMetadata{
"topic": {{
Offset: 123,
Partition: 2,
}},
},
}
testRequest(t, "V0", req, txnOffsetCommitRequest)
}
func TestTxnOffsetCommitRequestV2(t *testing.T) {
req := &TxnOffsetCommitRequest{
Version: 2,
TransactionalID: "txn",
GroupID: "groupid",
ProducerID: 8000,
ProducerEpoch: 1,
Topics: map[string][]*PartitionOffsetMetadata{
"topic": {{
Offset: 123,
Partition: 2,
LeaderEpoch: 9,
}},
},
}
testRequest(t, "V2", req, txnOffsetCommitRequestV2)
}
golang-github-ibm-sarama-1.46.2/txn_offset_commit_response.go 0000664 0000000 0000000 00000004047 15072577001 0024347 0 ustar 00root root 0000000 0000000 package sarama
import (
"time"
)
type TxnOffsetCommitResponse struct {
Version int16
ThrottleTime time.Duration
Topics map[string][]*PartitionError
}
func (t *TxnOffsetCommitResponse) setVersion(v int16) {
t.Version = v
}
func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error {
pe.putDurationMs(t.ThrottleTime)
if err := pe.putArrayLength(len(t.Topics)); err != nil {
return err
}
for topic, e := range t.Topics {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putArrayLength(len(e)); err != nil {
return err
}
for _, partitionError := range e {
if err := partitionError.encode(pe); err != nil {
return err
}
}
}
return nil
}
func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
t.Version = version
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
t.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
t.Topics = make(map[string][]*PartitionError)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
m, err := pd.getArrayLength()
if err != nil {
return err
}
t.Topics[topic] = make([]*PartitionError, m)
for j := 0; j < m; j++ {
t.Topics[topic][j] = new(PartitionError)
if err := t.Topics[topic][j].decode(pd, version); err != nil {
return err
}
}
}
return nil
}
func (a *TxnOffsetCommitResponse) key() int16 {
return apiKeyTxnOffsetCommit
}
func (a *TxnOffsetCommitResponse) version() int16 {
return a.Version
}
func (a *TxnOffsetCommitResponse) headerVersion() int16 {
return 0
}
func (a *TxnOffsetCommitResponse) isValidVersion() bool {
return a.Version >= 0 && a.Version <= 2
}
func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion {
switch a.Version {
case 2:
return V2_1_0_0
case 1:
return V2_0_0_0
case 0:
return V0_11_0_0
default:
return V2_1_0_0
}
}
func (r *TxnOffsetCommitResponse) throttleTime() time.Duration {
return r.ThrottleTime
}
golang-github-ibm-sarama-1.46.2/txn_offset_commit_response_test.go 0000664 0000000 0000000 00000001101 15072577001 0025372 0 ustar 00root root 0000000 0000000 //go:build !functional
package sarama
import (
"testing"
"time"
)
var txnOffsetCommitResponse = []byte{
0, 0, 0, 100,
0, 0, 0, 1, // 1 topic
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, 0, 1, // 1 partition response
0, 0, 0, 2, // partition number 2
0, 47, // err
}
func TestTxnOffsetCommitResponse(t *testing.T) {
resp := &TxnOffsetCommitResponse{
ThrottleTime: 100 * time.Millisecond,
Topics: map[string][]*PartitionError{
"topic": {{
Partition: 2,
Err: ErrInvalidProducerEpoch,
}},
},
}
testResponse(t, "", resp, txnOffsetCommitResponse)
}
golang-github-ibm-sarama-1.46.2/utils.go 0000664 0000000 0000000 00000024134 15072577001 0020041 0 ustar 00root root 0000000 0000000 package sarama
import (
"bufio"
"fmt"
"math/rand"
"net"
"regexp"
"time"
)
const (
defaultRetryBackoff = 100 * time.Millisecond
defaultRetryMaxBackoff = 1000 * time.Millisecond
)
type none struct{}
// make []int32 sortable so we can sort partition numbers
type int32Slice []int32
func (slice int32Slice) Len() int {
return len(slice)
}
func (slice int32Slice) Less(i, j int) bool {
return slice[i] < slice[j]
}
func (slice int32Slice) Swap(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
}
func dupInt32Slice(input []int32) []int32 {
ret := make([]int32, 0, len(input))
ret = append(ret, input...)
return ret
}
func withRecover(fn func()) {
defer func() {
handler := PanicHandler
if handler != nil {
if err := recover(); err != nil {
handler(err)
}
}
}()
fn()
}
func safeAsyncClose(b *Broker) {
go withRecover(func() {
if connected, _ := b.Connected(); connected {
if err := b.Close(); err != nil {
Logger.Println("Error closing broker", b.ID(), ":", err)
}
}
})
}
// Encoder is a simple interface for any type that can be encoded as an array of bytes
// in order to be sent as the key or value of a Kafka message. Length() is provided as an
// optimization, and must return the same as len() on the result of Encode().
type Encoder interface {
Encode() ([]byte, error)
Length() int
}
// make strings and byte slices encodable for convenience so they can be used as keys
// and/or values in kafka messages
// StringEncoder implements the Encoder interface for Go strings so that they can be used
// as the Key or Value in a ProducerMessage.
type StringEncoder string
func (s StringEncoder) Encode() ([]byte, error) {
return []byte(s), nil
}
func (s StringEncoder) Length() int {
return len(s)
}
// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used
// as the Key or Value in a ProducerMessage.
type ByteEncoder []byte
func (b ByteEncoder) Encode() ([]byte, error) {
return b, nil
}
func (b ByteEncoder) Length() int {
return len(b)
}
// bufConn wraps a net.Conn with a buffer for reads to reduce the number of
// reads that trigger syscalls.
type bufConn struct {
net.Conn
buf *bufio.Reader
}
func newBufConn(conn net.Conn) *bufConn {
return &bufConn{
Conn: conn,
buf: bufio.NewReader(conn),
}
}
func (bc *bufConn) Read(b []byte) (n int, err error) {
return bc.buf.Read(b)
}
// KafkaVersion instances represent versions of the upstream Kafka broker.
type KafkaVersion struct {
// it's a struct rather than just typing the array directly to make it opaque and stop people
// generating their own arbitrary versions
version [4]uint
}
func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion {
return KafkaVersion{
version: [4]uint{major, minor, veryMinor, patch},
}
}
// IsAtLeast return true if and only if the version it is called on is
// greater than or equal to the version passed in:
//
// V1.IsAtLeast(V2) // false
// V2.IsAtLeast(V1) // true
func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool {
for i := range v.version {
if v.version[i] > other.version[i] {
return true
} else if v.version[i] < other.version[i] {
return false
}
}
return true
}
// Effective constants defining the supported kafka versions.
var (
V0_8_2_0 = newKafkaVersion(0, 8, 2, 0)
V0_8_2_1 = newKafkaVersion(0, 8, 2, 1)
V0_8_2_2 = newKafkaVersion(0, 8, 2, 2)
V0_9_0_0 = newKafkaVersion(0, 9, 0, 0)
V0_9_0_1 = newKafkaVersion(0, 9, 0, 1)
V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
V0_10_1_1 = newKafkaVersion(0, 10, 1, 1)
V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
V0_10_2_1 = newKafkaVersion(0, 10, 2, 1)
V0_10_2_2 = newKafkaVersion(0, 10, 2, 2)
V0_11_0_0 = newKafkaVersion(0, 11, 0, 0)
V0_11_0_1 = newKafkaVersion(0, 11, 0, 1)
V0_11_0_2 = newKafkaVersion(0, 11, 0, 2)
V1_0_0_0 = newKafkaVersion(1, 0, 0, 0)
V1_0_1_0 = newKafkaVersion(1, 0, 1, 0)
V1_0_2_0 = newKafkaVersion(1, 0, 2, 0)
V1_1_0_0 = newKafkaVersion(1, 1, 0, 0)
V1_1_1_0 = newKafkaVersion(1, 1, 1, 0)
V2_0_0_0 = newKafkaVersion(2, 0, 0, 0)
V2_0_1_0 = newKafkaVersion(2, 0, 1, 0)
V2_1_0_0 = newKafkaVersion(2, 1, 0, 0)
V2_1_1_0 = newKafkaVersion(2, 1, 1, 0)
V2_2_0_0 = newKafkaVersion(2, 2, 0, 0)
V2_2_1_0 = newKafkaVersion(2, 2, 1, 0)
V2_2_2_0 = newKafkaVersion(2, 2, 2, 0)
V2_3_0_0 = newKafkaVersion(2, 3, 0, 0)
V2_3_1_0 = newKafkaVersion(2, 3, 1, 0)
V2_4_0_0 = newKafkaVersion(2, 4, 0, 0)
V2_4_1_0 = newKafkaVersion(2, 4, 1, 0)
V2_5_0_0 = newKafkaVersion(2, 5, 0, 0)
V2_5_1_0 = newKafkaVersion(2, 5, 1, 0)
V2_6_0_0 = newKafkaVersion(2, 6, 0, 0)
V2_6_1_0 = newKafkaVersion(2, 6, 1, 0)
V2_6_2_0 = newKafkaVersion(2, 6, 2, 0)
V2_6_3_0 = newKafkaVersion(2, 6, 3, 0)
V2_7_0_0 = newKafkaVersion(2, 7, 0, 0)
V2_7_1_0 = newKafkaVersion(2, 7, 1, 0)
V2_7_2_0 = newKafkaVersion(2, 7, 2, 0)
V2_8_0_0 = newKafkaVersion(2, 8, 0, 0)
V2_8_1_0 = newKafkaVersion(2, 8, 1, 0)
V2_8_2_0 = newKafkaVersion(2, 8, 2, 0)
V3_0_0_0 = newKafkaVersion(3, 0, 0, 0)
V3_0_1_0 = newKafkaVersion(3, 0, 1, 0)
V3_0_2_0 = newKafkaVersion(3, 0, 2, 0)
V3_1_0_0 = newKafkaVersion(3, 1, 0, 0)
V3_1_1_0 = newKafkaVersion(3, 1, 1, 0)
V3_1_2_0 = newKafkaVersion(3, 1, 2, 0)
V3_2_0_0 = newKafkaVersion(3, 2, 0, 0)
V3_2_1_0 = newKafkaVersion(3, 2, 1, 0)
V3_2_2_0 = newKafkaVersion(3, 2, 2, 0)
V3_2_3_0 = newKafkaVersion(3, 2, 3, 0)
V3_3_0_0 = newKafkaVersion(3, 3, 0, 0)
V3_3_1_0 = newKafkaVersion(3, 3, 1, 0)
V3_3_2_0 = newKafkaVersion(3, 3, 2, 0)
V3_4_0_0 = newKafkaVersion(3, 4, 0, 0)
V3_4_1_0 = newKafkaVersion(3, 4, 1, 0)
V3_5_0_0 = newKafkaVersion(3, 5, 0, 0)
V3_5_1_0 = newKafkaVersion(3, 5, 1, 0)
V3_5_2_0 = newKafkaVersion(3, 5, 2, 0)
V3_6_0_0 = newKafkaVersion(3, 6, 0, 0)
V3_6_1_0 = newKafkaVersion(3, 6, 1, 0)
V3_6_2_0 = newKafkaVersion(3, 6, 2, 0)
V3_7_0_0 = newKafkaVersion(3, 7, 0, 0)
V3_7_1_0 = newKafkaVersion(3, 7, 1, 0)
V3_7_2_0 = newKafkaVersion(3, 7, 2, 0)
V3_8_0_0 = newKafkaVersion(3, 8, 0, 0)
V3_8_1_0 = newKafkaVersion(3, 8, 1, 0)
V3_9_0_0 = newKafkaVersion(3, 9, 0, 0)
V3_9_1_0 = newKafkaVersion(3, 9, 1, 0)
V4_0_0_0 = newKafkaVersion(4, 0, 0, 0)
V4_1_0_0 = newKafkaVersion(4, 1, 0, 0)
SupportedVersions = []KafkaVersion{
V0_8_2_0,
V0_8_2_1,
V0_8_2_2,
V0_9_0_0,
V0_9_0_1,
V0_10_0_0,
V0_10_0_1,
V0_10_1_0,
V0_10_1_1,
V0_10_2_0,
V0_10_2_1,
V0_10_2_2,
V0_11_0_0,
V0_11_0_1,
V0_11_0_2,
V1_0_0_0,
V1_0_1_0,
V1_0_2_0,
V1_1_0_0,
V1_1_1_0,
V2_0_0_0,
V2_0_1_0,
V2_1_0_0,
V2_1_1_0,
V2_2_0_0,
V2_2_1_0,
V2_2_2_0,
V2_3_0_0,
V2_3_1_0,
V2_4_0_0,
V2_4_1_0,
V2_5_0_0,
V2_5_1_0,
V2_6_0_0,
V2_6_1_0,
V2_6_2_0,
V2_6_3_0,
V2_7_0_0,
V2_7_1_0,
V2_7_2_0,
V2_8_0_0,
V2_8_1_0,
V2_8_2_0,
V3_0_0_0,
V3_0_1_0,
V3_0_2_0,
V3_1_0_0,
V3_1_1_0,
V3_1_2_0,
V3_2_0_0,
V3_2_1_0,
V3_2_2_0,
V3_2_3_0,
V3_3_0_0,
V3_3_1_0,
V3_3_2_0,
V3_4_0_0,
V3_4_1_0,
V3_5_0_0,
V3_5_1_0,
V3_5_2_0,
V3_6_0_0,
V3_6_1_0,
V3_6_2_0,
V3_7_0_0,
V3_7_1_0,
V3_7_2_0,
V3_8_0_0,
V3_8_1_0,
V3_9_0_0,
V3_9_1_0,
V4_0_0_0,
V4_1_0_0,
}
MinVersion = V0_8_2_0
MaxVersion = V4_1_0_0
DefaultVersion = V2_1_0_0
// reduced set of protocol versions to matrix test
fvtRangeVersions = []KafkaVersion{
V0_8_2_2,
V0_10_2_2,
V1_0_2_0,
V1_1_1_0,
V2_0_1_0,
V2_2_2_0,
V2_4_1_0,
V2_6_3_0,
V2_8_2_0,
V3_1_2_0,
V3_3_2_0,
V3_6_2_0,
}
)
var (
// This regex validates that a string complies with the pre kafka 1.0.0 format for version strings, for example 0.11.0.3
validPreKafka1Version = regexp.MustCompile(`^0\.\d+\.\d+\.\d+$`)
// This regex validates that a string complies with the post Kafka 1.0.0 format, for example 1.0.0
validPostKafka1Version = regexp.MustCompile(`^\d+\.\d+\.\d+$`)
)
// ParseKafkaVersion parses and returns kafka version or error from a string
func ParseKafkaVersion(s string) (KafkaVersion, error) {
if len(s) < 5 {
return DefaultVersion, fmt.Errorf("invalid version `%s`", s)
}
var major, minor, veryMinor, patch uint
var err error
if s[0] == '0' {
err = scanKafkaVersion(s, validPreKafka1Version, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch})
} else {
err = scanKafkaVersion(s, validPostKafka1Version, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor})
}
if err != nil {
return DefaultVersion, err
}
return newKafkaVersion(major, minor, veryMinor, patch), nil
}
func scanKafkaVersion(s string, pattern *regexp.Regexp, format string, v [3]*uint) error {
if !pattern.MatchString(s) {
return fmt.Errorf("invalid version `%s`", s)
}
_, err := fmt.Sscanf(s, format, v[0], v[1], v[2])
return err
}
func (v KafkaVersion) String() string {
if v.version[0] == 0 {
return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3])
}
return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2])
}
// NewExponentialBackoff returns a function that implements an exponential backoff strategy with jitter.
// It follows KIP-580, implementing the formula:
// MIN(retry.backoff.max.ms, (retry.backoff.ms * 2**(failures - 1)) * random(0.8, 1.2))
// This ensures retries start with `backoff` and exponentially increase until `maxBackoff`, with added jitter.
// The behavior when `failures = 0` is not explicitly defined in KIP-580 and is left to implementation discretion.
//
// Example usage:
//
// backoffFunc := sarama.NewExponentialBackoff(config.Producer.Retry.Backoff, 2*time.Second)
// config.Producer.Retry.BackoffFunc = backoffFunc
func NewExponentialBackoff(backoff time.Duration, maxBackoff time.Duration) func(retries, maxRetries int) time.Duration {
if backoff <= 0 {
backoff = defaultRetryBackoff
}
if maxBackoff <= 0 {
maxBackoff = defaultRetryMaxBackoff
}
if backoff > maxBackoff {
Logger.Println("Warning: backoff is greater than maxBackoff, using maxBackoff instead.")
backoff = maxBackoff
}
return func(retries, maxRetries int) time.Duration {
if retries <= 0 {
return backoff
}
calculatedBackoff := backoff * time.Duration(1<<(retries-1))
jitter := 0.8 + 0.4*rand.Float64()
calculatedBackoff = time.Duration(float64(calculatedBackoff) * jitter)
return min(calculatedBackoff, maxBackoff)
}
}
golang-github-ibm-sarama-1.46.2/utils_test.go 0000664 0000000 0000000 00000006237 15072577001 0021104 0 ustar 00root root 0000000 0000000 //go:build !functional
package sarama
import (
"testing"
"time"
)
func TestVersionCompare(t *testing.T) {
if V0_8_2_0.IsAtLeast(V0_8_2_1) {
t.Error("0.8.2.0 >= 0.8.2.1")
}
if !V0_8_2_1.IsAtLeast(V0_8_2_0) {
t.Error("! 0.8.2.1 >= 0.8.2.0")
}
if !V0_8_2_0.IsAtLeast(V0_8_2_0) {
t.Error("! 0.8.2.0 >= 0.8.2.0")
}
if !V0_9_0_0.IsAtLeast(V0_8_2_1) {
t.Error("! 0.9.0.0 >= 0.8.2.1")
}
if V0_8_2_1.IsAtLeast(V0_10_0_0) {
t.Error("0.8.2.1 >= 0.10.0.0")
}
if !V1_0_0_0.IsAtLeast(V0_9_0_0) {
t.Error("! 1.0.0.0 >= 0.9.0.0")
}
if V0_9_0_0.IsAtLeast(V1_0_0_0) {
t.Error("0.9.0.0 >= 1.0.0.0")
}
}
func TestVersionParsing(t *testing.T) {
validVersions := []string{
"0.8.2.0",
"0.8.2.1",
"0.8.2.2",
"0.9.0.0",
"0.9.0.1",
"0.10.0.0",
"0.10.0.1",
"0.10.1.0",
"0.10.1.1",
"0.10.2.0",
"0.10.2.1",
"0.10.2.2",
"0.11.0.0",
"0.11.0.1",
"0.11.0.2",
"1.0.0",
"1.0.1",
"1.0.2",
"1.1.0",
"1.1.1",
"2.0.0",
"2.0.1",
"2.1.0",
"2.1.1",
"2.2.0",
"2.2.1",
"2.2.2",
"2.3.0",
"2.3.1",
"2.4.0",
"2.4.1",
"2.5.0",
"2.5.1",
"2.6.0",
"2.6.1",
"2.6.2",
"2.6.3",
"2.7.0",
"2.7.1",
"2.7.2",
"2.8.0",
"2.8.1",
"3.0.0",
"3.0.1",
"3.1.0",
"3.1.1",
"3.2.0",
}
for _, s := range validVersions {
v, err := ParseKafkaVersion(s)
if err != nil {
t.Errorf("could not parse valid version %s: %s", s, err)
}
if v.String() != s {
t.Errorf("version %s != %s", v.String(), s)
}
}
invalidVersions := []string{"0.8.2-4", "0.8.20", "1.19.0.0", "1.0.x"}
for _, s := range invalidVersions {
if _, err := ParseKafkaVersion(s); err == nil {
t.Errorf("invalid version %s parsed without error", s)
}
}
}
func TestExponentialBackoffValidCases(t *testing.T) {
testCases := []struct {
retries int
maxRetries int
minBackoff time.Duration
maxBackoffExpected time.Duration
}{
{1, 5, 80 * time.Millisecond, 120 * time.Millisecond},
{3, 5, 320 * time.Millisecond, 480 * time.Millisecond},
{5, 5, 1280 * time.Millisecond, 1920 * time.Millisecond},
}
for _, tc := range testCases {
backoffFunc := NewExponentialBackoff(100*time.Millisecond, 2*time.Second)
backoff := backoffFunc(tc.retries, tc.maxRetries)
if backoff < tc.minBackoff || backoff > tc.maxBackoffExpected {
t.Errorf("backoff(%d, %d): expected between %v and %v, got %v", tc.retries, tc.maxRetries, tc.minBackoff, tc.maxBackoffExpected, backoff)
}
}
}
func TestExponentialBackoffDefaults(t *testing.T) {
testCases := []struct {
backoff time.Duration
maxBackoff time.Duration
}{
{-100 * time.Millisecond, 2 * time.Second},
{100 * time.Millisecond, -2 * time.Second},
{-100 * time.Millisecond, -2 * time.Second},
{0 * time.Millisecond, 2 * time.Second},
{100 * time.Millisecond, 0 * time.Second},
{0 * time.Millisecond, 0 * time.Second},
}
for _, tc := range testCases {
backoffFunc := NewExponentialBackoff(tc.backoff, tc.maxBackoff)
backoff := backoffFunc(2, 5)
if backoff < defaultRetryBackoff || backoff > defaultRetryMaxBackoff {
t.Errorf("backoff(%v, %v): expected between %v and %v, got %v",
tc.backoff, tc.maxBackoff, defaultRetryBackoff, defaultRetryMaxBackoff, backoff)
}
}
}
golang-github-ibm-sarama-1.46.2/version.go 0000664 0000000 0000000 00000000717 15072577001 0020367 0 ustar 00root root 0000000 0000000 package sarama
import (
"runtime/debug"
"sync"
)
var (
v string
vOnce sync.Once
)
func version() string {
vOnce.Do(func() {
bi, ok := debug.ReadBuildInfo()
if ok {
v = bi.Main.Version
}
if v == "" || v == "(devel)" {
// if we can't read a go module version then they're using a git
// clone or vendored module so all we can do is report "dev" for
// the version to make a valid ApiVersions request
v = "dev"
}
})
return v
}
golang-github-ibm-sarama-1.46.2/zstd.go 0000664 0000000 0000000 00000003655 15072577001 0017672 0 ustar 00root root 0000000 0000000 package sarama
import (
"sync"
"github.com/klauspost/compress/zstd"
)
// zstdMaxBufferedEncoders maximum number of not-in-use zstd encoders
// If the pool of encoders is exhausted then new encoders will be created on the fly
const zstdMaxBufferedEncoders = 1
type ZstdEncoderParams struct {
Level int
}
type ZstdDecoderParams struct {
}
var zstdDecMap sync.Map
var zstdAvailableEncoders sync.Map
func getZstdEncoderChannel(params ZstdEncoderParams) chan *zstd.Encoder {
if c, ok := zstdAvailableEncoders.Load(params); ok {
return c.(chan *zstd.Encoder)
}
c, _ := zstdAvailableEncoders.LoadOrStore(params, make(chan *zstd.Encoder, zstdMaxBufferedEncoders))
return c.(chan *zstd.Encoder)
}
func getZstdEncoder(params ZstdEncoderParams) *zstd.Encoder {
select {
case enc := <-getZstdEncoderChannel(params):
return enc
default:
encoderLevel := zstd.SpeedDefault
if params.Level != CompressionLevelDefault {
encoderLevel = zstd.EncoderLevelFromZstd(params.Level)
}
zstdEnc, _ := zstd.NewWriter(nil, zstd.WithZeroFrames(true),
zstd.WithEncoderLevel(encoderLevel),
zstd.WithEncoderConcurrency(1))
return zstdEnc
}
}
func releaseEncoder(params ZstdEncoderParams, enc *zstd.Encoder) {
select {
case getZstdEncoderChannel(params) <- enc:
default:
}
}
func getDecoder(params ZstdDecoderParams) *zstd.Decoder {
if ret, ok := zstdDecMap.Load(params); ok {
return ret.(*zstd.Decoder)
}
// It's possible to race and create multiple new readers.
// Only one will survive GC after use.
zstdDec, _ := zstd.NewReader(nil, zstd.WithDecoderConcurrency(0))
zstdDecMap.Store(params, zstdDec)
return zstdDec
}
func zstdDecompress(params ZstdDecoderParams, dst, src []byte) ([]byte, error) {
return getDecoder(params).DecodeAll(src, dst)
}
func zstdCompress(params ZstdEncoderParams, dst, src []byte) ([]byte, error) {
enc := getZstdEncoder(params)
out := enc.EncodeAll(src, dst)
releaseEncoder(params, enc)
return out, nil
}
golang-github-ibm-sarama-1.46.2/zstd_test.go 0000664 0000000 0000000 00000001152 15072577001 0020717 0 ustar 00root root 0000000 0000000 //go:build !functional
package sarama
import (
"runtime"
"testing"
)
func BenchmarkZstdMemoryConsumption(b *testing.B) {
params := ZstdEncoderParams{Level: 9}
buf := make([]byte, 1024*1024)
for i := 0; i < len(buf); i++ {
buf[i] = byte((i / 256) + (i * 257))
}
cpus := 96
gomaxprocsBackup := runtime.GOMAXPROCS(cpus)
b.ReportAllocs()
for b.Loop() {
for j := 0; j < 2*cpus; j++ {
_, _ = zstdCompress(params, nil, buf)
}
// drain the buffered encoder
getZstdEncoder(params)
// previously this would be achieved with
// zstdEncMap.Delete(params)
}
runtime.GOMAXPROCS(gomaxprocsBackup)
}