import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class Example {
public static void main(String[] args) {
final String regex = "(?<date>\\[.*?\\]) (.*?) ((.|\\n*)*)";
final String string = "[2022-07-22 09:53:27,793] INFO KafkaConfig values:\n"
+ " advertised.host.name = null\n"
+ " advertised.listeners = INTERNAL://0.0.0.0:9092,BROKER://0.0.0.0:9091,CLIENT://0.0.0.0:9093\n"
+ " advertised.port = null\n"
+ " alter.config.policy.class.name = null\n"
+ " alter.log.dirs.replication.quota.window.num = 11\n"
+ " alter.log.dirs.replication.quota.window.size.seconds = 1\n"
+ " authorizer.class.name =\n"
+ " auto.create.topics.enable = true\n"
+ " auto.leader.rebalance.enable = true\n"
+ " background.threads = 10\n"
+ " broker.id = 0\n"
+ " broker.id.generation.enable = true\n"
+ " broker.rack = null\n"
+ " client.quota.callback.class = null\n"
+ " compression.type = producer\n"
+ " connection.failed.authentication.delay.ms = 100\n"
+ " connections.max.idle.ms = 600000\n"
+ " connections.max.reauth.ms = 0\n"
+ " control.plane.listener.name = null\n"
+ " controlled.shutdown.enable = true\n"
+ " controlled.shutdown.max.retries = 3\n"
+ " controlled.shutdown.retry.backoff.ms = 5000\n"
+ " controller.socket.timeout.ms = 30000\n"
+ " create.topic.policy.class.name = null\n"
+ " default.replication.factor = 1\n"
+ " delegation.token.expiry.check.interval.ms = 3600000\n"
+ " delegation.token.expiry.time.ms = 86400000\n"
+ " delegation.token.master.key = null\n"
+ " delegation.token.max.lifetime.ms = 604800000\n"
+ " delete.records.purgatory.purge.interval.requests = 1\n"
+ " delete.topic.enable = true\n"
+ " fetch.purgatory.purge.interval.requests = 1000\n"
+ " group.initial.rebalance.delay.ms = 3000\n"
+ " group.max.session.timeout.ms = 1800000\n"
+ " group.max.size = 2147483647\n"
+ " group.min.session.timeout.ms = 6000\n"
+ " host.name =\n"
+ " inter.broker.listener.name = BROKER\n"
+ " inter.broker.protocol.version = 2.3-IV1\n"
+ " kafka.metrics.polling.interval.secs = 10\n"
+ " kafka.metrics.reporters = []\n"
+ " leader.imbalance.check.interval.seconds = 300\n"
+ " leader.imbalance.per.broker.percentage = 10\n"
+ " listener.security.protocol.map = INTERNAL:PLAINTEXT,BROKER:PLAINTEXT,CLIENT:PLAINTEXT\n"
+ " listeners = INTERNAL://:9092,BROKER://:9091,CLIENT://:9093\n"
+ " log.cleaner.backoff.ms = 15000\n"
+ " log.cleaner.dedupe.buffer.size = 134217728\n"
+ " log.cleaner.delete.retention.ms = 86400000\n"
+ " log.cleaner.enable = true\n"
+ " log.cleaner.io.buffer.load.factor = 0.9\n"
+ " log.cleaner.io.buffer.size = 524288\n"
+ " log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308\n"
+ " log.cleaner.max.compaction.lag.ms = 9223372036854775807\n"
+ " log.cleaner.min.cleanable.ratio = 0.5\n"
+ " log.cleaner.min.compaction.lag.ms = 0\n"
+ " log.cleaner.threads = 1\n"
+ " log.cleanup.policy = [delete]\n"
+ " log.dir = /tmp/kafka-logs\n"
+ " log.dirs = /var/lib/kafka\n"
+ " log.flush.interval.messages = 9223372036854775807\n"
+ " log.flush.interval.ms = null\n"
+ " log.flush.offset.checkpoint.interval.ms = 60000\n"
+ " log.flush.scheduler.interval.ms = 9223372036854775807\n"
+ " log.flush.start.offset.checkpoint.interval.ms = 60000\n"
+ " log.index.interval.bytes = 4096\n"
+ " log.index.size.max.bytes = 10485760\n"
+ " log.message.downconversion.enable = true\n"
+ " log.message.format.version = 2.3-IV1\n"
+ " log.message.timestamp.difference.max.ms = 9223372036854775807\n"
+ " log.message.timestamp.type = CreateTime\n"
+ " log.preallocate = false\n"
+ " log.retention.bytes = -1\n"
+ " log.retention.check.interval.ms = 300000\n"
+ " log.retention.hours = 120\n"
+ " log.retention.minutes = null\n"
+ " log.retention.ms = null\n"
+ " log.roll.hours = 168\n"
+ " log.roll.jitter.hours = 0\n"
+ " log.roll.jitter.ms = null\n"
+ " log.roll.ms = null\n"
+ " log.segment.bytes = 1073741824\n"
+ " log.segment.delete.delay.ms = 60000\n"
+ " max.connections = 2147483647\n"
+ " max.connections.per.ip = 2147483647\n"
+ " max.connections.per.ip.overrides =\n"
+ " max.incremental.fetch.session.cache.slots = 1000\n"
+ " message.max.bytes = 1000012\n"
+ " metric.reporters = []\n"
+ " metrics.num.samples = 2\n"
+ " metrics.recording.level = INFO\n"
+ " metrics.sample.window.ms = 30000\n"
+ " min.insync.replicas = 1\n"
+ " num.io.threads = 8\n"
+ " num.network.threads = 3\n"
+ " num.partitions = 1\n"
+ " num.recovery.threads.per.data.dir = 1\n"
+ " num.replica.alter.log.dirs.threads = null\n"
+ " num.replica.fetchers = 1\n"
+ " offset.metadata.max.bytes = 4096\n"
+ " offsets.commit.required.acks = -1\n"
+ " offsets.commit.timeout.ms = 5000\n"
+ " offsets.load.buffer.size = 5242880\n"
+ " offsets.retention.check.interval.ms = 600000\n"
+ " offsets.retention.minutes = 10080\n"
+ " offsets.topic.compression.codec = 0\n"
+ " offsets.topic.num.partitions = 50\n"
+ " offsets.topic.replication.factor = 1\n"
+ " offsets.topic.segment.bytes = 104857600\n"
+ " password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding\n"
+ " password.encoder.iterations = 4096\n"
+ " password.encoder.key.length = 128\n"
+ " password.encoder.keyfactory.algorithm = null\n"
+ " password.encoder.old.secret = null\n"
+ " password.encoder.secret = null\n"
+ " port = 9092\n"
+ " principal.builder.class = null\n"
+ " producer.purgatory.purge.interval.requests = 1000\n"
+ " queued.max.request.bytes = -1\n"
+ " queued.max.requests = 500\n"
+ " quota.consumer.default = 9223372036854775807\n"
+ " quota.producer.default = 9223372036854775807\n"
+ " quota.window.num = 11\n"
+ " quota.window.size.seconds = 1\n"
+ " replica.fetch.backoff.ms = 1000\n"
+ " replica.fetch.max.bytes = 1048576\n"
+ " replica.fetch.min.bytes = 1\n"
+ " replica.fetch.response.max.bytes = 10485760\n"
+ " replica.fetch.wait.max.ms = 500\n"
+ " replica.high.watermark.checkpoint.interval.ms = 5000\n"
+ " replica.lag.time.max.ms = 10000\n"
+ " replica.socket.receive.buffer.bytes = 65536\n"
+ " replica.socket.timeout.ms = 30000\n"
+ " replication.quota.window.num = 11\n"
+ " replication.quota.window.size.seconds = 1\n"
+ " request.timeout.ms = 30000\n"
+ " reserved.broker.max.id = 1000\n"
+ " sasl.client.callback.handler.class = null\n"
+ " sasl.enabled.mechanisms = [GSSAPI]\n"
+ " sasl.jaas.config = null\n"
+ " sasl.kerberos.kinit.cmd = /usr/bin/kinit\n"
+ " sasl.kerberos.min.time.before.relogin = 60000\n"
+ " sasl.kerberos.principal.to.local.rules = [DEFAULT]\n"
+ " sasl.kerberos.service.name = null\n"
+ " sasl.kerberos.ticket.renew.jitter = 0.05\n"
+ " sasl.kerberos.ticket.renew.window.factor = 0.8\n"
+ " sasl.login.callback.handler.class = null\n"
+ " sasl.login.class = null\n"
+ " sasl.login.refresh.buffer.seconds = 300\n"
+ " sasl.login.refresh.min.period.seconds = 60\n"
+ " sasl.login.refresh.window.factor = 0.8\n"
+ " sasl.login.refresh.window.jitter = 0.05\n"
+ " sasl.mechanism.inter.broker.protocol = GSSAPI\n"
+ " sasl.server.callback.handler.class = null\n"
+ " security.inter.broker.protocol = PLAINTEXT\n"
+ " socket.receive.buffer.bytes = 102400\n"
+ " socket.request.max.bytes = 104857600\n"
+ " socket.send.buffer.bytes = 102400\n"
+ " ssl.cipher.suites = []\n"
+ " ssl.client.auth = none\n"
+ " ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]\n"
+ " ssl.endpoint.identification.algorithm = https\n"
+ " ssl.key.password = null\n"
+ " ssl.keymanager.algorithm = SunX509\n"
+ " ssl.keystore.location = null\n"
+ " ssl.keystore.password = null\n"
+ " ssl.keystore.type = JKS\n"
+ " ssl.principal.mapping.rules = [DEFAULT]\n"
+ " ssl.protocol = TLS\n"
+ " ssl.provider = null\n"
+ " ssl.secure.random.implementation = null\n"
+ " ssl.trustmanager.algorithm = PKIX\n"
+ " ssl.truststore.location = null\n"
+ " ssl.truststore.password = null\n"
+ " ssl.truststore.type = JKS\n"
+ " transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000\n"
+ " transaction.max.timeout.ms = 900000\n"
+ " transaction.remove.expired.transaction.cleanup.interval.ms = 3600000\n"
+ " transaction.state.log.load.buffer.size = 5242880\n"
+ " transaction.state.log.min.isr = 2\n"
+ " transaction.state.log.num.partitions = 50\n"
+ " transaction.state.log.replication.factor = 3\n"
+ " transaction.state.log.segment.bytes = 104857600\n"
+ " transactional.id.expiration.ms = 604800000\n"
+ " unclean.leader.election.enable = false\n"
+ " zookeeper.connect = 0.0.0.0:2181\n"
+ " zookeeper.connection.timeout.ms = 18000\n"
+ " zookeeper.max.in.flight.requests = 10\n"
+ " zookeeper.session.timeout.ms = 6000\n"
+ " zookeeper.set.acl = false\n"
+ " zookeeper.sync.time.ms = 2000\n"
+ " (kafka.server.KafkaConfig)\n"
+ "[2022-07-22 09:53:27,840] ERROR Fatal error during SupportedServerStartable startup. Prepare to shutdown (io.confluent.support.metrics.SupportedKafka)\n"
+ "java.lang.IllegalArgumentException: requirement failed: advertised.listeners cannot use the nonroutable meta-address 0.0.0.0. Use a routable IP address.\n"
+ " at scala.Predef$.require(Predef.scala:224)\n"
+ " at kafka.server.KafkaConfig.validateValues(KafkaConfig.scala:1492)\n"
+ " at kafka.server.KafkaConfig.<init>(KafkaConfig.scala:1460)\n"
+ " at kafka.server.KafkaConfig.<init>(KafkaConfig.scala:1114)\n"
+ " at kafka.server.KafkaConfig$.fromProps(KafkaConfig.scala:1094)\n"
+ " at kafka.server.KafkaConfig$.fromProps(KafkaConfig.scala:1091)\n"
+ " at kafka.server.KafkaConfig.fromProps(KafkaConfig.scala)\n"
+ " at io.confluent.support.metrics.SupportedServerStartable.<init>(SupportedServerStartable.java:52)\n"
+ " at io.confluent.support.metrics.SupportedKafka.main(SupportedKafka.java:45)";
final Pattern pattern = Pattern.compile(regex);
final Matcher matcher = pattern.matcher(string);
if (matcher.find()) {
System.out.println("Full match: " + matcher.group(0));
for (int i = 1; i <= matcher.groupCount(); i++) {
System.out.println("Group " + i + ": " + matcher.group(i));
}
}
}
}
Please keep in mind that these code samples are automatically generated and are not guaranteed to work. If you find any syntax errors, feel free to submit a bug report. For a full regex reference for Java, please visit: https://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html