我们都知道kafka利用zookeeper做分布式管理,具体创建使用了哪些znode节点呢?

答案均在源码的ZkData.scala文件中,具体路径如下:

https://github.com/apache/kafka/edit/2.1/core/src/main/scala/kafka/zk/ZkData.scala

详细文件内容如下:

  1. /**
  2. * Licensed to the Apache Software Foundation (ASF) under one or more
  3. * contributor license agreements. See the NOTICE file distributed with
  4. * this work for additional information regarding copyright ownership.
  5. * The ASF licenses this file to You under the Apache License, Version 2.0
  6. * (the "License"); you may not use this file except in compliance with
  7. * the License. You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. */
  17. package kafka.zk
  18.  
  19. import java.nio.charset.StandardCharsets.UTF_8
  20. import java.util.Properties
  21.  
  22. import com.fasterxml.jackson.annotation.JsonProperty
  23. import com.fasterxml.jackson.core.JsonProcessingException
  24. import kafka.api.{ApiVersion, KAFKA_0_10_0_IV1, LeaderAndIsr}
  25. import kafka.cluster.{Broker, EndPoint}
  26. import kafka.common.{NotificationHandler, ZkNodeChangeNotificationListener}
  27. import kafka.controller.{IsrChangeNotificationHandler, LeaderIsrAndControllerEpoch}
  28. import kafka.security.auth.Resource.Separator
  29. import kafka.security.auth.SimpleAclAuthorizer.VersionedAcls
  30. import kafka.security.auth.{Acl, Resource, ResourceType}
  31. import kafka.server.{ConfigType, DelegationTokenManager}
  32. import kafka.utils.Json
  33. import org.apache.kafka.common.{KafkaException, TopicPartition}
  34. import org.apache.kafka.common.errors.UnsupportedVersionException
  35. import org.apache.kafka.common.network.ListenerName
  36. import org.apache.kafka.common.resource.PatternType
  37. import org.apache.kafka.common.security.auth.SecurityProtocol
  38. import org.apache.kafka.common.security.token.delegation.{DelegationToken, TokenInformation}
  39. import org.apache.kafka.common.utils.Time
  40. import org.apache.zookeeper.ZooDefs
  41. import org.apache.zookeeper.data.{ACL, Stat}
  42.  
  43. import scala.beans.BeanProperty
  44. import scala.collection.JavaConverters._
  45. import scala.collection.mutable.ArrayBuffer
  46. import scala.collection.{Seq, breakOut}
  47. import scala.util.{Failure, Success, Try}
  48.  
  49. // This file contains objects for encoding/decoding data stored in ZooKeeper nodes (znodes).
  50.  
  51. object ControllerZNode {
  52. def path = "/controller"
  53. def encode(brokerId: Int, timestamp: Long): Array[Byte] = {
  54. Json.encodeAsBytes(Map("version" -> 1, "brokerid" -> brokerId, "timestamp" -> timestamp.toString).asJava)
  55. }
  56. def decode(bytes: Array[Byte]): Option[Int] = Json.parseBytes(bytes).map { js =>
  57. js.asJsonObject("brokerid").to[Int]
  58. }
  59. }
  60.  
  61. object ControllerEpochZNode {
  62. def path = "/controller_epoch"
  63. def encode(epoch: Int): Array[Byte] = epoch.toString.getBytes(UTF_8)
  64. def decode(bytes: Array[Byte]): Int = new String(bytes, UTF_8).toInt
  65. }
  66.  
  67. object ConfigZNode {
  68. def path = "/config"
  69. }
  70.  
  71. object BrokersZNode {
  72. def path = "/brokers"
  73. }
  74.  
  75. object BrokerIdsZNode {
  76. def path = s"${BrokersZNode.path}/ids"
  77. def encode: Array[Byte] = null
  78. }
  79.  
  80. object BrokerInfo {
  81.  
  82. /**
  83. * Create a broker info with v4 json format (which includes multiple endpoints and rack) if
  84. * the apiVersion is 0.10.0.X or above. Register the broker with v2 json format otherwise.
  85. *
  86. * Due to KAFKA-3100, 0.9.0.0 broker and old clients will break if JSON version is above 2.
  87. *
  88. * We include v2 to make it possible for the broker to migrate from 0.9.0.0 to 0.10.0.X or above without having to
  89. * upgrade to 0.9.0.1 first (clients have to be upgraded to 0.9.0.1 in any case).
  90. */
  91. def apply(broker: Broker, apiVersion: ApiVersion, jmxPort: Int): BrokerInfo = {
  92. // see method documentation for the reason why we do this
  93. val version = if (apiVersion >= KAFKA_0_10_0_IV1) 4 else 2
  94. BrokerInfo(broker, version, jmxPort)
  95. }
  96.  
  97. }
  98.  
  99. case class BrokerInfo(broker: Broker, version: Int, jmxPort: Int) {
  100. val path: String = BrokerIdZNode.path(broker.id)
  101. def toJsonBytes: Array[Byte] = BrokerIdZNode.encode(this)
  102. }
  103.  
  104. object BrokerIdZNode {
  105. private val HostKey = "host"
  106. private val PortKey = "port"
  107. private val VersionKey = "version"
  108. private val EndpointsKey = "endpoints"
  109. private val RackKey = "rack"
  110. private val JmxPortKey = "jmx_port"
  111. private val ListenerSecurityProtocolMapKey = "listener_security_protocol_map"
  112. private val TimestampKey = "timestamp"
  113.  
  114. def path(id: Int) = s"${BrokerIdsZNode.path}/$id"
  115.  
  116. /**
  117. * Encode to JSON bytes.
  118. *
  119. * The JSON format includes a top level host and port for compatibility with older clients.
  120. */
  121. def encode(version: Int, host: String, port: Int, advertisedEndpoints: Seq[EndPoint], jmxPort: Int,
  122. rack: Option[String]): Array[Byte] = {
  123. val jsonMap = collection.mutable.Map(VersionKey -> version,
  124. HostKey -> host,
  125. PortKey -> port,
  126. EndpointsKey -> advertisedEndpoints.map(_.connectionString).toBuffer.asJava,
  127. JmxPortKey -> jmxPort,
  128. TimestampKey -> Time.SYSTEM.milliseconds().toString
  129. )
  130. rack.foreach(rack => if (version >= 3) jsonMap += (RackKey -> rack))
  131.  
  132. if (version >= 4) {
  133. jsonMap += (ListenerSecurityProtocolMapKey -> advertisedEndpoints.map { endPoint =>
  134. endPoint.listenerName.value -> endPoint.securityProtocol.name
  135. }.toMap.asJava)
  136. }
  137. Json.encodeAsBytes(jsonMap.asJava)
  138. }
  139.  
  140. def encode(brokerInfo: BrokerInfo): Array[Byte] = {
  141. val broker = brokerInfo.broker
  142. // the default host and port are here for compatibility with older clients that only support PLAINTEXT
  143. // we choose the first plaintext port, if there is one
  144. // or we register an empty endpoint, which means that older clients will not be able to connect
  145. val plaintextEndpoint = broker.endPoints.find(_.securityProtocol == SecurityProtocol.PLAINTEXT).getOrElse(
  146. new EndPoint(null, -1, null, null))
  147. encode(brokerInfo.version, plaintextEndpoint.host, plaintextEndpoint.port, broker.endPoints, brokerInfo.jmxPort,
  148. broker.rack)
  149. }
  150.  
  151. /**
  152. * Create a BrokerInfo object from id and JSON bytes.
  153. *
  154. * @param id
  155. * @param jsonBytes
  156. *
  157. * Version 1 JSON schema for a broker is:
  158. * {
  159. * "version":1,
  160. * "host":"localhost",
  161. * "port":9092
  162. * "jmx_port":9999,
  163. * "timestamp":"2233345666"
  164. * }
  165. *
  166. * Version 2 JSON schema for a broker is:
  167. * {
  168. * "version":2,
  169. * "host":"localhost",
  170. * "port":9092,
  171. * "jmx_port":9999,
  172. * "timestamp":"2233345666",
  173. * "endpoints":["PLAINTEXT://host1:9092", "SSL://host1:9093"]
  174. * }
  175. *
  176. * Version 3 JSON schema for a broker is:
  177. * {
  178. * "version":3,
  179. * "host":"localhost",
  180. * "port":9092,
  181. * "jmx_port":9999,
  182. * "timestamp":"2233345666",
  183. * "endpoints":["PLAINTEXT://host1:9092", "SSL://host1:9093"],
  184. * "rack":"dc1"
  185. * }
  186. *
  187. * Version 4 (current) JSON schema for a broker is:
  188. * {
  189. * "version":4,
  190. * "host":"localhost",
  191. * "port":9092,
  192. * "jmx_port":9999,
  193. * "timestamp":"2233345666",
  194. * "endpoints":["CLIENT://host1:9092", "REPLICATION://host1:9093"],
  195. * "listener_security_protocol_map":{"CLIENT":"SSL", "REPLICATION":"PLAINTEXT"},
  196. * "rack":"dc1"
  197. * }
  198. */
  199. def decode(id: Int, jsonBytes: Array[Byte]): BrokerInfo = {
  200. Json.tryParseBytes(jsonBytes) match {
  201. case Right(js) =>
  202. val brokerInfo = js.asJsonObject
  203. val version = brokerInfo(VersionKey).to[Int]
  204. val jmxPort = brokerInfo(JmxPortKey).to[Int]
  205.  
  206. val endpoints =
  207. if (version < 1)
  208. throw new KafkaException("Unsupported version of broker registration: " +
  209. s"${new String(jsonBytes, UTF_8)}")
  210. else if (version == 1) {
  211. val host = brokerInfo(HostKey).to[String]
  212. val port = brokerInfo(PortKey).to[Int]
  213. val securityProtocol = SecurityProtocol.PLAINTEXT
  214. val endPoint = new EndPoint(host, port, ListenerName.forSecurityProtocol(securityProtocol), securityProtocol)
  215. Seq(endPoint)
  216. }
  217. else {
  218. val securityProtocolMap = brokerInfo.get(ListenerSecurityProtocolMapKey).map(
  219. _.to[Map[String, String]].map { case (listenerName, securityProtocol) =>
  220. new ListenerName(listenerName) -> SecurityProtocol.forName(securityProtocol)
  221. })
  222. val listeners = brokerInfo(EndpointsKey).to[Seq[String]]
  223. listeners.map(EndPoint.createEndPoint(_, securityProtocolMap))
  224. }
  225.  
  226. val rack = brokerInfo.get(RackKey).flatMap(_.to[Option[String]])
  227. BrokerInfo(Broker(id, endpoints, rack), version, jmxPort)
  228. case Left(e) =>
  229. throw new KafkaException(s"Failed to parse ZooKeeper registration for broker $id: " +
  230. s"${new String(jsonBytes, UTF_8)}", e)
  231. }
  232. }
  233. }
  234.  
  235. object TopicsZNode {
  236. def path = s"${BrokersZNode.path}/topics"
  237. }
  238.  
  239. object TopicZNode {
  240. def path(topic: String) = s"${TopicsZNode.path}/$topic"
  241. def encode(assignment: collection.Map[TopicPartition, Seq[Int]]): Array[Byte] = {
  242. val assignmentJson = assignment.map { case (partition, replicas) =>
  243. partition.partition.toString -> replicas.asJava
  244. }
  245. Json.encodeAsBytes(Map("version" -> 1, "partitions" -> assignmentJson.asJava).asJava)
  246. }
  247. def decode(topic: String, bytes: Array[Byte]): Map[TopicPartition, Seq[Int]] = {
  248. Json.parseBytes(bytes).flatMap { js =>
  249. val assignmentJson = js.asJsonObject
  250. val partitionsJsonOpt = assignmentJson.get("partitions").map(_.asJsonObject)
  251. partitionsJsonOpt.map { partitionsJson =>
  252. partitionsJson.iterator.map { case (partition, replicas) =>
  253. new TopicPartition(topic, partition.toInt) -> replicas.to[Seq[Int]]
  254. }
  255. }
  256. }.map(_.toMap).getOrElse(Map.empty)
  257. }
  258. }
  259.  
  260. object TopicPartitionsZNode {
  261. def path(topic: String) = s"${TopicZNode.path(topic)}/partitions"
  262. }
  263.  
  264. object TopicPartitionZNode {
  265. def path(partition: TopicPartition) = s"${TopicPartitionsZNode.path(partition.topic)}/${partition.partition}"
  266. }
  267.  
  268. object TopicPartitionStateZNode {
  269. def path(partition: TopicPartition) = s"${TopicPartitionZNode.path(partition)}/state"
  270. def encode(leaderIsrAndControllerEpoch: LeaderIsrAndControllerEpoch): Array[Byte] = {
  271. val leaderAndIsr = leaderIsrAndControllerEpoch.leaderAndIsr
  272. val controllerEpoch = leaderIsrAndControllerEpoch.controllerEpoch
  273. Json.encodeAsBytes(Map("version" -> 1, "leader" -> leaderAndIsr.leader, "leader_epoch" -> leaderAndIsr.leaderEpoch,
  274. "controller_epoch" -> controllerEpoch, "isr" -> leaderAndIsr.isr.asJava).asJava)
  275. }
  276. def decode(bytes: Array[Byte], stat: Stat): Option[LeaderIsrAndControllerEpoch] = {
  277. Json.parseBytes(bytes).map { js =>
  278. val leaderIsrAndEpochInfo = js.asJsonObject
  279. val leader = leaderIsrAndEpochInfo("leader").to[Int]
  280. val epoch = leaderIsrAndEpochInfo("leader_epoch").to[Int]
  281. val isr = leaderIsrAndEpochInfo("isr").to[List[Int]]
  282. val controllerEpoch = leaderIsrAndEpochInfo("controller_epoch").to[Int]
  283. val zkPathVersion = stat.getVersion
  284. LeaderIsrAndControllerEpoch(LeaderAndIsr(leader, epoch, isr, zkPathVersion), controllerEpoch)
  285. }
  286. }
  287. }
  288.  
  289. object ConfigEntityTypeZNode {
  290. def path(entityType: String) = s"${ConfigZNode.path}/$entityType"
  291. }
  292.  
  293. object ConfigEntityZNode {
  294. def path(entityType: String, entityName: String) = s"${ConfigEntityTypeZNode.path(entityType)}/$entityName"
  295. def encode(config: Properties): Array[Byte] = {
  296. Json.encodeAsBytes(Map("version" -> 1, "config" -> config).asJava)
  297. }
  298. def decode(bytes: Array[Byte]): Properties = {
  299. val props = new Properties()
  300. if (bytes != null) {
  301. Json.parseBytes(bytes).foreach { js =>
  302. val configOpt = js.asJsonObjectOption.flatMap(_.get("config").flatMap(_.asJsonObjectOption))
  303. configOpt.foreach(config => config.iterator.foreach { case (k, v) => props.setProperty(k, v.to[String]) })
  304. }
  305. }
  306. props
  307. }
  308. }
  309.  
  310. object ConfigEntityChangeNotificationZNode {
  311. def path = s"${ConfigZNode.path}/changes"
  312. }
  313.  
  314. object ConfigEntityChangeNotificationSequenceZNode {
  315. val SequenceNumberPrefix = "config_change_"
  316. def createPath = s"${ConfigEntityChangeNotificationZNode.path}/$SequenceNumberPrefix"
  317. def encode(sanitizedEntityPath: String): Array[Byte] = Json.encodeAsBytes(
  318. Map("version" -> 2, "entity_path" -> sanitizedEntityPath).asJava)
  319. }
  320.  
  321. object IsrChangeNotificationZNode {
  322. def path = "/isr_change_notification"
  323. }
  324.  
  325. object IsrChangeNotificationSequenceZNode {
  326. val SequenceNumberPrefix = "isr_change_"
  327. def path(sequenceNumber: String = "") = s"${IsrChangeNotificationZNode.path}/$SequenceNumberPrefix$sequenceNumber"
  328. def encode(partitions: collection.Set[TopicPartition]): Array[Byte] = {
  329. val partitionsJson = partitions.map(partition => Map("topic" -> partition.topic, "partition" -> partition.partition).asJava)
  330. Json.encodeAsBytes(Map("version" -> IsrChangeNotificationHandler.Version, "partitions" -> partitionsJson.asJava).asJava)
  331. }
  332.  
  333. def decode(bytes: Array[Byte]): Set[TopicPartition] = {
  334. Json.parseBytes(bytes).map { js =>
  335. val partitionsJson = js.asJsonObject("partitions").asJsonArray
  336. partitionsJson.iterator.map { partitionsJson =>
  337. val partitionJson = partitionsJson.asJsonObject
  338. val topic = partitionJson("topic").to[String]
  339. val partition = partitionJson("partition").to[Int]
  340. new TopicPartition(topic, partition)
  341. }
  342. }
  343. }.map(_.toSet).getOrElse(Set.empty)
  344. def sequenceNumber(path: String) = path.substring(path.lastIndexOf(SequenceNumberPrefix) + SequenceNumberPrefix.length)
  345. }
  346.  
  347. object LogDirEventNotificationZNode {
  348. def path = "/log_dir_event_notification"
  349. }
  350.  
  351. object LogDirEventNotificationSequenceZNode {
  352. val SequenceNumberPrefix = "log_dir_event_"
  353. val LogDirFailureEvent = 1
  354. def path(sequenceNumber: String) = s"${LogDirEventNotificationZNode.path}/$SequenceNumberPrefix$sequenceNumber"
  355. def encode(brokerId: Int) = {
  356. Json.encodeAsBytes(Map("version" -> 1, "broker" -> brokerId, "event" -> LogDirFailureEvent).asJava)
  357. }
  358. def decode(bytes: Array[Byte]): Option[Int] = Json.parseBytes(bytes).map { js =>
  359. js.asJsonObject("broker").to[Int]
  360. }
  361. def sequenceNumber(path: String) = path.substring(path.lastIndexOf(SequenceNumberPrefix) + SequenceNumberPrefix.length)
  362. }
  363.  
  364. object AdminZNode {
  365. def path = "/admin"
  366. }
  367.  
  368. object DeleteTopicsZNode {
  369. def path = s"${AdminZNode.path}/delete_topics"
  370. }
  371.  
  372. object DeleteTopicsTopicZNode {
  373. def path(topic: String) = s"${DeleteTopicsZNode.path}/$topic"
  374. }
  375.  
  376. object ReassignPartitionsZNode {
  377.  
  378. /**
  379. * The assignment of brokers for a `TopicPartition`.
  380. *
  381. * A replica assignment consists of a `topic`, `partition` and a list of `replicas`, which
  382. * represent the broker ids that the `TopicPartition` is assigned to.
  383. */
  384. case class ReplicaAssignment(@BeanProperty @JsonProperty("topic") topic: String,
  385. @BeanProperty @JsonProperty("partition") partition: Int,
  386. @BeanProperty @JsonProperty("replicas") replicas: java.util.List[Int])
  387.  
  388. /**
  389. * An assignment consists of a `version` and a list of `partitions`, which represent the
  390. * assignment of topic-partitions to brokers.
  391. */
  392. case class PartitionAssignment(@BeanProperty @JsonProperty("version") version: Int,
  393. @BeanProperty @JsonProperty("partitions") partitions: java.util.List[ReplicaAssignment])
  394.  
  395. def path = s"${AdminZNode.path}/reassign_partitions"
  396.  
  397. def encode(reassignmentMap: collection.Map[TopicPartition, Seq[Int]]): Array[Byte] = {
  398. val reassignment = PartitionAssignment(1,
  399. reassignmentMap.toSeq.map { case (tp, replicas) =>
  400. ReplicaAssignment(tp.topic, tp.partition, replicas.asJava)
  401. }.asJava
  402. )
  403. Json.encodeAsBytes(reassignment)
  404. }
  405.  
  406. def decode(bytes: Array[Byte]): Either[JsonProcessingException, collection.Map[TopicPartition, Seq[Int]]] =
  407. Json.parseBytesAs[PartitionAssignment](bytes).right.map { partitionAssignment =>
  408. partitionAssignment.partitions.asScala.map { replicaAssignment =>
  409. new TopicPartition(replicaAssignment.topic, replicaAssignment.partition) -> replicaAssignment.replicas.asScala
  410. }(breakOut)
  411. }
  412. }
  413.  
  414. object PreferredReplicaElectionZNode {
  415. def path = s"${AdminZNode.path}/preferred_replica_election"
  416. def encode(partitions: Set[TopicPartition]): Array[Byte] = {
  417. val jsonMap = Map("version" -> 1,
  418. "partitions" -> partitions.map(tp => Map("topic" -> tp.topic, "partition" -> tp.partition).asJava).asJava)
  419. Json.encodeAsBytes(jsonMap.asJava)
  420. }
  421. def decode(bytes: Array[Byte]): Set[TopicPartition] = Json.parseBytes(bytes).map { js =>
  422. val partitionsJson = js.asJsonObject("partitions").asJsonArray
  423. partitionsJson.iterator.map { partitionsJson =>
  424. val partitionJson = partitionsJson.asJsonObject
  425. val topic = partitionJson("topic").to[String]
  426. val partition = partitionJson("partition").to[Int]
  427. new TopicPartition(topic, partition)
  428. }
  429. }.map(_.toSet).getOrElse(Set.empty)
  430. }
  431.  
  432. //old consumer path znode
  433. object ConsumerPathZNode {
  434. def path = "/consumers"
  435. }
  436.  
  437. object ConsumerOffset {
  438. def path(group: String, topic: String, partition: Integer) = s"${ConsumerPathZNode.path}/${group}/offsets/${topic}/${partition}"
  439. def encode(offset: Long): Array[Byte] = offset.toString.getBytes(UTF_8)
  440. def decode(bytes: Array[Byte]): Option[Long] = Option(bytes).map(new String(_, UTF_8).toLong)
  441. }
  442.  
  443. object ZkVersion {
  444. val MatchAnyVersion = -1 // if used in a conditional set, matches any version (the value should match ZooKeeper codebase)
  445. val UnknownVersion = -2 // Version returned from get if node does not exist (internal constant for Kafka codebase, unused value in ZK)
  446. }
  447.  
  448. object ZkStat {
  449. val NoStat = new Stat()
  450. }
  451.  
  452. object StateChangeHandlers {
  453. val ControllerHandler = "controller-state-change-handler"
  454. def zkNodeChangeListenerHandler(seqNodeRoot: String) = s"change-notification-$seqNodeRoot"
  455. }
  456.  
  457. /**
  458. * Acls for resources are stored in ZK under two root paths:
  459. * <ul>
  460. * <li>[[org.apache.kafka.common.resource.PatternType#LITERAL Literal]] patterns are stored under '/kafka-acl'.
  461. * The format is JSON. See [[kafka.zk.ResourceZNode]] for details.</li>
  462. * <li>All other patterns are stored under '/kafka-acl-extended/<i>pattern-type</i>'.
  463. * The format is JSON. See [[kafka.zk.ResourceZNode]] for details.</li>
  464. * </ul>
  465. *
  466. * Under each root node there will be one child node per resource type (Topic, Cluster, Group, etc).
  467. * Under each resourceType there will be a unique child for each resource pattern and the data for that child will contain
  468. * list of its acls as a json object. Following gives an example:
  469. *
  470. * <pre>
  471. * // Literal patterns:
  472. * /kafka-acl/Topic/topic-1 => {"version": 1, "acls": [ { "host":"host1", "permissionType": "Allow","operation": "Read","principal": "User:alice"}]}
  473. * /kafka-acl/Cluster/kafka-cluster => {"version": 1, "acls": [ { "host":"host1", "permissionType": "Allow","operation": "Read","principal": "User:alice"}]}
  474. *
  475. * // Prefixed patterns:
  476. * /kafka-acl-extended/PREFIXED/Group/group-1 => {"version": 1, "acls": [ { "host":"host1", "permissionType": "Allow","operation": "Read","principal": "User:alice"}]}
  477. * </pre>
  478. *
  479. * Acl change events are also stored under two paths:
  480. * <ul>
  481. * <li>[[org.apache.kafka.common.resource.PatternType#LITERAL Literal]] patterns are stored under '/kafka-acl-changes'.
  482. * The format is a UTF8 string in the form: <resource-type>:<resource-name></li>
  483. * <li>All other patterns are stored under '/kafka-acl-extended-changes'
  484. * The format is JSON, as defined by [[kafka.zk.ExtendedAclChangeEvent]]</li>
  485. * </ul>
  486. */
  487. sealed trait ZkAclStore {
  488. val patternType: PatternType
  489. val aclPath: String
  490.  
  491. def path(resourceType: ResourceType): String = s"$aclPath/$resourceType"
  492.  
  493. def path(resourceType: ResourceType, resourceName: String): String = s"$aclPath/$resourceType/$resourceName"
  494.  
  495. def changeStore: ZkAclChangeStore
  496. }
  497.  
  498. object ZkAclStore {
  499. private val storesByType: Map[PatternType, ZkAclStore] = PatternType.values
  500. .filter(_.isSpecific)
  501. .map(patternType => (patternType, create(patternType)))
  502. .toMap
  503.  
  504. val stores: Iterable[ZkAclStore] = storesByType.values
  505.  
  506. val securePaths: Iterable[String] = stores
  507. .flatMap(store => Set(store.aclPath, store.changeStore.aclChangePath))
  508.  
  509. def apply(patternType: PatternType): ZkAclStore = {
  510. storesByType.get(patternType) match {
  511. case Some(store) => store
  512. case None => throw new KafkaException(s"Invalid pattern type: $patternType")
  513. }
  514. }
  515.  
  516. private def create(patternType: PatternType) = {
  517. patternType match {
  518. case PatternType.LITERAL => LiteralAclStore
  519. case _ => new ExtendedAclStore(patternType)
  520. }
  521. }
  522. }
  523.  
  524. object LiteralAclStore extends ZkAclStore {
  525. val patternType: PatternType = PatternType.LITERAL
  526. val aclPath: String = "/kafka-acl"
  527.  
  528. def changeStore: ZkAclChangeStore = LiteralAclChangeStore
  529. }
  530.  
  531. class ExtendedAclStore(val patternType: PatternType) extends ZkAclStore {
  532. if (patternType == PatternType.LITERAL)
  533. throw new IllegalArgumentException("Literal pattern types are not supported")
  534.  
  535. val aclPath: String = s"/kafka-acl-extended/${patternType.name.toLowerCase}"
  536.  
  537. def changeStore: ZkAclChangeStore = ExtendedAclChangeStore
  538. }
  539.  
  540. trait AclChangeNotificationHandler {
  541. def processNotification(resource: Resource): Unit
  542. }
  543.  
  544. trait AclChangeSubscription extends AutoCloseable {
  545. def close(): Unit
  546. }
  547.  
  548. case class AclChangeNode(path: String, bytes: Array[Byte])
  549.  
  550. sealed trait ZkAclChangeStore {
  551. val aclChangePath: String
  552. def createPath: String = s"$aclChangePath/${ZkAclChangeStore.SequenceNumberPrefix}"
  553.  
  554. def decode(bytes: Array[Byte]): Resource
  555.  
  556. protected def encode(resource: Resource): Array[Byte]
  557.  
  558. def createChangeNode(resource: Resource): AclChangeNode = AclChangeNode(createPath, encode(resource))
  559.  
  560. def createListener(handler: AclChangeNotificationHandler, zkClient: KafkaZkClient): AclChangeSubscription = {
  561. val rawHandler: NotificationHandler = new NotificationHandler {
  562. def processNotification(bytes: Array[Byte]): Unit =
  563. handler.processNotification(decode(bytes))
  564. }
  565.  
  566. val aclChangeListener = new ZkNodeChangeNotificationListener(
  567. zkClient, aclChangePath, ZkAclChangeStore.SequenceNumberPrefix, rawHandler)
  568.  
  569. aclChangeListener.init()
  570.  
  571. new AclChangeSubscription {
  572. def close(): Unit = aclChangeListener.close()
  573. }
  574. }
  575. }
  576.  
  577. object ZkAclChangeStore {
  578. val stores: Iterable[ZkAclChangeStore] = List(LiteralAclChangeStore, ExtendedAclChangeStore)
  579.  
  580. def SequenceNumberPrefix = "acl_changes_"
  581. }
  582.  
  583. case object LiteralAclChangeStore extends ZkAclChangeStore {
  584. val name = "LiteralAclChangeStore"
  585. val aclChangePath: String = "/kafka-acl-changes"
  586.  
  587. def encode(resource: Resource): Array[Byte] = {
  588. if (resource.patternType != PatternType.LITERAL)
  589. throw new IllegalArgumentException("Only literal resource patterns can be encoded")
  590.  
  591. val legacyName = resource.resourceType + Resource.Separator + resource.name
  592. legacyName.getBytes(UTF_8)
  593. }
  594.  
  595. def decode(bytes: Array[Byte]): Resource = {
  596. val string = new String(bytes, UTF_8)
  597. string.split(Separator, 2) match {
  598. case Array(resourceType, resourceName, _*) => new Resource(ResourceType.fromString(resourceType), resourceName, PatternType.LITERAL)
  599. case _ => throw new IllegalArgumentException("expected a string in format ResourceType:ResourceName but got " + string)
  600. }
  601. }
  602. }
  603.  
  604. case object ExtendedAclChangeStore extends ZkAclChangeStore {
  605. val name = "ExtendedAclChangeStore"
  606. val aclChangePath: String = "/kafka-acl-extended-changes"
  607.  
  608. def encode(resource: Resource): Array[Byte] = {
  609. if (resource.patternType == PatternType.LITERAL)
  610. throw new IllegalArgumentException("Literal pattern types are not supported")
  611.  
  612. Json.encodeAsBytes(ExtendedAclChangeEvent(
  613. ExtendedAclChangeEvent.currentVersion,
  614. resource.resourceType.name,
  615. resource.name,
  616. resource.patternType.name))
  617. }
  618.  
  619. def decode(bytes: Array[Byte]): Resource = {
  620. val changeEvent = Json.parseBytesAs[ExtendedAclChangeEvent](bytes) match {
  621. case Right(event) => event
  622. case Left(e) => throw new IllegalArgumentException("Failed to parse ACL change event", e)
  623. }
  624.  
  625. changeEvent.toResource match {
  626. case Success(r) => r
  627. case Failure(e) => throw new IllegalArgumentException("Failed to convert ACL change event to resource", e)
  628. }
  629. }
  630. }
  631.  
  632. object ResourceZNode {
  633. def path(resource: Resource): String = ZkAclStore(resource.patternType).path(resource.resourceType, resource.name)
  634.  
  635. def encode(acls: Set[Acl]): Array[Byte] = Json.encodeAsBytes(Acl.toJsonCompatibleMap(acls).asJava)
  636. def decode(bytes: Array[Byte], stat: Stat): VersionedAcls = VersionedAcls(Acl.fromBytes(bytes), stat.getVersion)
  637. }
  638.  
  639. object ExtendedAclChangeEvent {
  640. val currentVersion: Int = 1
  641. }
  642.  
  643. case class ExtendedAclChangeEvent(@BeanProperty @JsonProperty("version") version: Int,
  644. @BeanProperty @JsonProperty("resourceType") resourceType: String,
  645. @BeanProperty @JsonProperty("name") name: String,
  646. @BeanProperty @JsonProperty("patternType") patternType: String) {
  647. if (version > ExtendedAclChangeEvent.currentVersion)
  648. throw new UnsupportedVersionException(s"Acl change event received for unsupported version: $version")
  649.  
  650. def toResource: Try[Resource] = {
  651. for {
  652. resType <- Try(ResourceType.fromString(resourceType))
  653. patType <- Try(PatternType.fromString(patternType))
  654. resource = Resource(resType, name, patType)
  655. } yield resource
  656. }
  657. }
  658.  
  659. object ClusterZNode {
  660. def path = "/cluster"
  661. }
  662.  
  663. object ClusterIdZNode {
  664. def path = s"${ClusterZNode.path}/id"
  665.  
  666. def toJson(id: String): Array[Byte] = {
  667. Json.encodeAsBytes(Map("version" -> "1", "id" -> id).asJava)
  668. }
  669.  
  670. def fromJson(clusterIdJson: Array[Byte]): String = {
  671. Json.parseBytes(clusterIdJson).map(_.asJsonObject("id").to[String]).getOrElse {
  672. throw new KafkaException(s"Failed to parse the cluster id json $clusterIdJson")
  673. }
  674. }
  675. }
  676.  
  677. object BrokerSequenceIdZNode {
  678. def path = s"${BrokersZNode.path}/seqid"
  679. }
  680.  
  681. object ProducerIdBlockZNode {
  682. def path = "/latest_producer_id_block"
  683. }
  684.  
  685. object DelegationTokenAuthZNode {
  686. def path = "/delegation_token"
  687. }
  688.  
  689. object DelegationTokenChangeNotificationZNode {
  690. def path = s"${DelegationTokenAuthZNode.path}/token_changes"
  691. }
  692.  
  693. object DelegationTokenChangeNotificationSequenceZNode {
  694. val SequenceNumberPrefix = "token_change_"
  695. def createPath = s"${DelegationTokenChangeNotificationZNode.path}/$SequenceNumberPrefix"
  696. def deletePath(sequenceNode: String) = s"${DelegationTokenChangeNotificationZNode.path}/${sequenceNode}"
  697. def encode(tokenId : String): Array[Byte] = tokenId.getBytes(UTF_8)
  698. def decode(bytes: Array[Byte]): String = new String(bytes, UTF_8)
  699. }
  700.  
  701. object DelegationTokensZNode {
  702. def path = s"${DelegationTokenAuthZNode.path}/tokens"
  703. }
  704.  
  705. object DelegationTokenInfoZNode {
  706. def path(tokenId: String) = s"${DelegationTokensZNode.path}/$tokenId"
  707. def encode(token: DelegationToken): Array[Byte] = Json.encodeAsBytes(DelegationTokenManager.toJsonCompatibleMap(token).asJava)
  708. def decode(bytes: Array[Byte]): Option[TokenInformation] = DelegationTokenManager.fromBytes(bytes)
  709. }
  710.  
  711. object ZkData {
  712.  
  713. // Important: it is necessary to add any new top level Zookeeper path to the Seq
  714. val SecureRootPaths = Seq(AdminZNode.path,
  715. BrokersZNode.path,
  716. ClusterZNode.path,
  717. ConfigZNode.path,
  718. ControllerZNode.path,
  719. ControllerEpochZNode.path,
  720. IsrChangeNotificationZNode.path,
  721. ProducerIdBlockZNode.path,
  722. LogDirEventNotificationZNode.path,
  723. DelegationTokenAuthZNode.path) ++ ZkAclStore.securePaths
  724.  
  725. // These are persistent ZK paths that should exist on kafka broker startup.
  726. val PersistentZkPaths = Seq(
  727. ConsumerPathZNode.path, // old consumer path
  728. BrokerIdsZNode.path,
  729. TopicsZNode.path,
  730. ConfigEntityChangeNotificationZNode.path,
  731. DeleteTopicsZNode.path,
  732. BrokerSequenceIdZNode.path,
  733. IsrChangeNotificationZNode.path,
  734. ProducerIdBlockZNode.path,
  735. LogDirEventNotificationZNode.path
  736. ) ++ ConfigType.all.map(ConfigEntityTypeZNode.path)
  737.  
  738. val SensitiveRootPaths = Seq(
  739. ConfigEntityTypeZNode.path(ConfigType.User),
  740. ConfigEntityTypeZNode.path(ConfigType.Broker),
  741. DelegationTokensZNode.path
  742. )
  743.  
  744. def sensitivePath(path: String): Boolean = {
  745. path != null && SensitiveRootPaths.exists(path.startsWith)
  746. }
  747.  
  748. def defaultAcls(isSecure: Boolean, path: String): Seq[ACL] = {
  749. //Old Consumer path is kept open as different consumers will write under this node.
  750. if (!ConsumerPathZNode.path.equals(path) && isSecure) {
  751. val acls = new ArrayBuffer[ACL]
  752. acls ++= ZooDefs.Ids.CREATOR_ALL_ACL.asScala
  753. if (!sensitivePath(path))
  754. acls ++= ZooDefs.Ids.READ_ACL_UNSAFE.asScala
  755. acls
  756. } else ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala
  757. }
  758. }

  

kafka在zookeeper创建使用了哪些znode节点?的更多相关文章

  1. kafka在zookeeper中的存储结构

    参考site:http://kafka.apache.org/documentation.html#impl_zookeeper 1.zookeeper客户端相关命令 在确保zookeeper服务启动 ...

  2. Kafka在zookeeper中存储结构和查看方式

    Zookeeper 主要用来跟踪Kafka 集群中的节点状态, 以及Kafka Topic, message 等等其他信息. 同时, Kafka 依赖于Zookeeper, 没有Zookeeper 是 ...

  3. kafka与zookeeper

    kafka简介 kafka (官网地址:http://kafka.apache.org)是一款分布式消息发布和订阅的系统,具有高性能和高吞吐率. 下载地址:http://kafka.apache.or ...

  4. Kafka学习之路 (五)Kafka在zookeeper中的存储

    一.Kafka在zookeeper中存储结构图 二.分析 2.1 topic注册信息 /brokers/topics/[topic] : 存储某个topic的partitions所有分配信息 [zk: ...

  5. Kafka(四)Kafka在zookeeper中的存储

    一 Kafka在zookeeper中存储结构图 二 分析 2.1 topic注册信息 /brokers/topics/[topic] : 存储某个topic的partitions所有分配信息 [zk: ...

  6. kafka与zookeeper实战笔记

    kafka命令 1.先启动zookeeper zkServer.cmd/zkServer.sh2.启动kafka[需要指定server.properties文件] kafka-server-start ...

  7. Kafka 入门(三)--为什么 Kafka 依赖 ZooKeeper?

    一.ZooKeeper 简介 1.基本介绍 ZooKeeper 的官网是:https://zookeeper.apache.org/.在官网上是这么介绍 ZooKeeper 的:ZooKeeper 是 ...

  8. zookeeper 之znode 节点

    <pre name="code" class="html">使用 ls 命令来查看当前 ZooKeeper 中所包含的内容: [zk: 10.77. ...

  9. kafka依赖zookeeper原因解析及应用场景

    kafka简介: kafka是一个发布订阅消息系统,由topic区分消息种类,每个topic中可以有多个partition,每个kafka集群有一个多个broker服务器组成,producer可以发布 ...

随机推荐

  1. 11.Redis 哨兵集群实现高可用

    作者:中华石杉 Redis 哨兵集群实现高可用 哨兵的介绍 sentinel,中文名是哨兵.哨兵是 redis 集群机构中非常重要的一个组件,主要有以下功能: 集群监控:负责监控 redis mast ...

  2. 如何解决问题:程序无法正常启动(0xc0000022)

    如何解决问题:程序无法正常启动(0xc0000022) 下文是由NeoSmart技术,通过全球网络上的工程师开发者和技术人员一起收集起来的信息,进行汇总并编辑而成的. 错误现象 该错误一般会通过警告, ...

  3. C++面向对象程序设计学习笔记(4)

    类与对象(2) string类 C++不仅向下兼容C的字符表示方法,也声明了一种更方便的字符串类型,即string类. 想要使用string类,必须包括头文件string,即要声明 #include& ...

  4. openjdk11 stretch基础镜像无法找到对应openjdk dbg 包的问题

    今天在构建一个jdk perf 工具基于openjdk 11 发现8 的dbg 一直可以查找到,但是11的就是没有 参考issue https://github.com/docker-library/ ...

  5. ABP 异常

    abp自己封装了一个异常的基类: 此异常用于直接显示给用户,可用于返回一些提示,如:密码错误,用户名不能为空等. 参数 Severity :异常的严重程度 是个Enum类型 基本使用: 系统常见异常: ...

  6. 1-开发共享版APP(搭建指南)-快速搭建到自己的服务器

    该APP安装包下载链接: http://www.mnif.cn/appapk/IotDevelopmentVersion/20190820/app-debug.apk 或者扫描二维码下载 注:该下载可 ...

  7. 测开面试 | Python常问算法

    1.排序 从小到大排序:sorted(list) 从大到小排序:sorted(list, reverse=True) sort() 方法,改变原有数组的顺序 sort(reverse=True) #! ...

  8. 日常笔记4关于cin、cin.get()、cin.getline()、getline()使用区别

    1.关于PAT中段错误 使用字符数组出现错误: char str[256]; 报错段错误,然后改用C++中的string 改成: string str; 同char数组一样,也可以使用下标来取单个字符 ...

  9. [LeetCode] 829. Consecutive Numbers Sum 连续数字之和

    Given a positive integer N, how many ways can we write it as a sum of consecutive positive integers? ...

  10. [LeetCode] 15. 3Sum 三数之和

    Given an array S of n integers, are there elements a, b, c in S such that a + b + c = 0? Find all un ...