Apache kafka 了解卡夫卡zookeper自动复位
我仍然对卡夫卡ZOOKEPER_AUTO_RESET(卡夫卡ZOOKEPER_自动复位)有疑问。我看到很多关于这方面的问题。请原谅,如果这是一个重复的查询 我有一个高级java消费者,它一直在消费。 我有多个主题,所有主题都有一个分区 我关心的是以下几点 我启动了consumerkafka.jar,用户组名为“ncdev1”,并且Apache kafka 了解卡夫卡zookeper自动复位,apache-kafka,Apache Kafka,我仍然对卡夫卡ZOOKEPER_AUTO_RESET(卡夫卡ZOOKEPER_自动复位)有疑问。我看到很多关于这方面的问题。请原谅,如果这是一个重复的查询 我有一个高级java消费者,它一直在消费。 我有多个主题,所有主题都有一个分区 我关心的是以下几点 我启动了consumerkafka.jar,用户组名为“ncdev1”,并且ZOOKEPER\u AUTO\u RESET=minimate。可以观察到init offset设置为-1。过了一段时间,我停止/启动了罐子。此时,它将拾取分配给消费
ZOOKEPER\u AUTO\u RESET=minimate
。可以观察到init offset设置为-1。过了一段时间,我停止/启动了罐子。此时,它将拾取分配给消费者组(ncdev1)ie 36的最新偏移量。一段时间后我再次重新启动,然后initoffset设置为39。这是最新的值
然后我将组名更改为ZOOKEPER\u group\u ID=ncdev2
。然后重新启动jar文件,这次偏移量再次设置为-1。在进一步重新启动时,它跳至最新值ie 39
然后我设置了ZOOKEPER\u AUTO\u RESET=maximum
和ZOOKEPER\u GROUP\u ID=ncdev3
然后尝试重新启动组名为ncdev3的jar文件。它在重新启动时拾取偏移量的方式没有区别。也就是说,它在重新启动时正在拾取39,这与之前的配置相同
你知道为什么它不能从一开始就读取偏移量吗?要从一开始就读取它,还需要进行其他配置吗?(来自的最大和最小理解)
提前谢谢
代码添加
public class ConsumerForKafka {
private final ConsumerConnector consumer;
private final String topic;
private ExecutorService executor;
ServerSocket soketToWrite;
Socket s_Accept ;
OutputStream s1out ;
DataOutputStream dos;
static boolean logEnabled ;
static File fileName;
private static final Logger logger = Logger.getLogger(ConsumerForKafka.class);
public ConsumerForKafka(String a_zookeeper, String a_groupId, String a_topic,String session_timeout,String auto_reset,String a_commitEnable) {
consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
createConsumerConfig(a_zookeeper, a_groupId,session_timeout,auto_reset,a_commitEnable));
this.topic =a_topic;
}
public void run(int a_numThreads,String a_zookeeper, String a_topic) throws InterruptedException, IOException {
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(topic, new Integer(a_numThreads));
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
String socketURL = PropertyUtils.getProperty("SOCKET_CONNECT_HOST");
int socketPort = Integer.parseInt(PropertyUtils.getProperty("SOCKET_CONNECT_PORT"));
Socket socks = new Socket(socketURL,socketPort);
//****
String keeper = a_zookeeper;
String topic = a_topic;
long millis = new java.util.Date().getTime();
//****
PrintWriter outWriter = new PrintWriter(socks.getOutputStream(), true);
List<KafkaStream<byte[], byte[]>> streams = null;
// now create an object to consume the messages
//
int threadNumber = 0;
// System.out.println("going to forTopic value is "+topic);
boolean keepRunningThread =false;
boolean chcek = false;
logger.info("logged");
BufferedWriter bw = null;
FileWriter fw = null;
if(logEnabled){
fw = new FileWriter(fileName, true);
bw = new BufferedWriter(fw);
}
for (;;) {
streams = consumerMap.get(topic);
keepRunningThread =true;
for (final KafkaStream stream : streams) {
ConsumerIterator<byte[], byte[]> it = stream.iterator();
while(keepRunningThread)
{
try{
if (it.hasNext()){
if(logEnabled){
String data = new String(it.next().message())+""+"\n";
bw.write(data);
bw.flush();
outWriter.print(data);
outWriter.flush();
consumer.commitOffsets();
logger.info("Explicit commit ......");
}else{
outWriter.print(new String(it.next().message())+""+"\n");
outWriter.flush();
}
}
// logger.info("running");
} catch(ConsumerTimeoutException ex) {
keepRunningThread =false;
break;
}catch(NullPointerException npe ){
keepRunningThread =true;
npe.printStackTrace();
}catch(IllegalStateException ile){
keepRunningThread =true;
ile.printStackTrace();
}
}
}
}
}
private static ConsumerConfig createConsumerConfig(String a_zookeeper, String a_groupId,String session_timeout,String auto_reset,String commitEnable) {
Properties props = new Properties();
props.put("zookeeper.connect", a_zookeeper);
props.put("group.id", a_groupId);
props.put("zookeeper.session.timeout.ms", session_timeout);
props.put("zookeeper.sync.time.ms", "2000");
props.put("auto.offset.reset", auto_reset);
props.put("auto.commit.interval.ms", "60000");
props.put("consumer.timeout.ms", "30");
props.put("auto.commit.enable",commitEnable);
//props.put("rebalance.max.retries", "4");
return new ConsumerConfig(props);
}
public static void main(String[] args) throws InterruptedException {
String zooKeeper = PropertyUtils.getProperty("ZOOKEEPER_URL_PORT");
String groupId = PropertyUtils.getProperty("ZOOKEPER_GROUP_ID");
String session_timeout = PropertyUtils.getProperty("ZOOKEPER_SESSION_TIMOUT_MS"); //6400
String auto_reset = PropertyUtils.getProperty("ZOOKEPER_AUTO_RESET"); //smallest
String enableLogging = PropertyUtils.getProperty("ENABLE_LOG");
String directoryPath = PropertyUtils.getProperty("LOG_DIRECTORY");
String log4jpath = PropertyUtils.getProperty("LOG_DIR");
String commitEnable = PropertyUtils.getProperty("ZOOKEPER_COMMIT"); //false
PropertyConfigurator.configure(log4jpath);
String socketURL = PropertyUtils.getProperty("SOCKET_CONNECT_HOST");
int socketPort = Integer.parseInt(PropertyUtils.getProperty("SOCKET_CONNECT_PORT"));
try {
Socket socks = new Socket(socketURL,socketPort);
boolean connected = socks.isConnected() && !socks.isClosed();
if(connected){
//System.out.println("Able to connect ");
}else{
logger.info("Not able to conenct to socket ..Exiting...");
System.exit(0);
}
} catch (UnknownHostException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
} catch(java.net.ConnectException cne){
logger.info("Not able to conenct to socket ..Exitring...");
System.exit(0);
}
catch (IOException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
// String zooKeeper = args[0];
// String groupId = args[1];
String topic = args[0];
int threads = 1;
logEnabled = Boolean.parseBoolean(enableLogging);
if(logEnabled)
createDirectory(topic,directoryPath);
ConsumerForKafka example = new ConsumerForKafka(zooKeeper, groupId, topic, session_timeout,auto_reset,commitEnable);
try {
example.run(threads,zooKeeper,topic);
} catch(java.net.ConnectException cne){
cne.printStackTrace();
System.exit(0);
}
catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
private static void createDirectory(String topic,String d_Path) {
try{
File file = new File(d_Path);
if (!file.exists()) {
if (file.mkdir()) {
logger.info("Directory Created" +file.getPath());
} else {
logger.info("Directory Creation failed");
}
}
fileName = new File(d_Path + topic + ".log");
if (!fileName.exists()) {
fileName.createNewFile();
}
}catch(IOException IOE){
//logger.info("IOException occured during Directory or During File creation ");
}
}
}
公共类消费者工作卡{
私人最终消费者连接消费者;
私有字符串主题;
私人遗嘱执行人;
服务器套接字soketToWrite;
插座s_接受;
输出流s1out;
数据输出流dos;
启用静态布尔逻辑;
静态文件名;
私有静态最终记录器=Logger.getLogger(ConsumerForKafka.class);
公共消费者工作卡(字符串a\u zookeeper、字符串a\u groupId、字符串a\u主题、字符串会话\u超时、字符串自动\u重置、字符串a\u可提交){
consumer=kafka.consumer.consumer.createJavaConsumerConnector(
createConsumerConfig(一个zookeeper、一个groupId、会话超时、自动重置、一个可提交);
this.topic=a_topic;
}
public void run(int a_numThreads、String a_zookeeper、String a_topic)抛出InterruptedException、IOException{
Map topicCountMap=新HashMap();
put(主题,新整数(a_numThreads));
Map consumerMap=consumer.createMessageStreams(topicCountMap);
字符串socketURL=PropertyUtils.getProperty(“套接字连接主机”);
int socketPort=Integer.parseInt(PropertyUtils.getProperty(“套接字连接端口”);
Socket socks=新套接字(socketURL、socketPort);
//****
字符串保持器=一个字符串保持器;
字符串主题=一个主题;
long millis=new java.util.Date().getTime();
//****
PrintWriter outWriter=新的PrintWriter(socks.getOutputStream(),true);
列表流=空;
//现在创建一个对象来使用消息
//
int threadNumber=0;
//System.out.println(“转到forTopic值是”+主题);
布尔keepRunningThread=false;
布尔值chcek=false;
logger.info(“logged”);
BufferedWriter bw=null;
FileWriter fw=null;
如果(已启用日志){
fw=新文件编写器(文件名,true);
bw=新的缓冲写入程序(fw);
}
对于(;;){
streams=consumerMap.get(主题);
keepRunningThread=true;
对于(最终卡夫卡斯特雷姆流:流){
ConsumerIterator it=stream.iterator();
while(keepRunningThread)
{
试一试{
if(it.hasNext()){
如果(已启用日志){
字符串数据=新字符串(it.next().message())+“”+“\n”;
写入(数据);
bw.flush();
打印(数据);
outWriter.flush();
consumer.commitofsets();
logger.info(“显式提交…”);
}否则{
outWriter.print(新字符串(it.next().message())+“”+“\n”);
outWriter.flush();
}
}
//logger.info(“运行”);
}捕获(ConsumerTimeoutException ex){
keepRunningThread=false;
打破
}捕获(NullPointerException npe){
keepRunningThread=true;
npe.printStackTrace();
}捕获(非法状态异常){
keepRunningThread=true;
file.printStackTrace();
}
}
}
}
}
私有静态ConsumerConfig CreateCummerConfig(字符串a\u zookeeper、字符串a\u groupId、字符串会话\u超时、字符串自动\u重置、字符串可提交){
Properties props=新属性();
道具。放置(“zookeeper.connect”,a_zookeeper);
props.put(“group.id”,a_groupId);
props.put(“zookeeper.session.timeout.ms”,session\u timeout);
道具放置(“zookeeper.sync.time.ms”,“2000”);
道具放置(“自动偏移重置”,自动重置);
props.put(“auto.commit.interval.ms”,“60000”);
道具放置(“consumer.timeout.ms”,“30”);
props.put(“auto.commit.enable”,committeable);
//道具放置(“再平衡。最大重试次数”,“4”);
返回新的消费者配置(道具);
}
公共静态void main(字符串[]args)引发InterruptedException{
字符串zooKeeper=PropertyUtils.getProperty(“zooKeeper\uURL\uPort”);
String groupId=PropertyUtils.getProperty(“ZOOKEPER\u GROUP\u ID”)