1
+ /// User facing module of this library.
2
+ ///
3
+ /// Fetching message from multiple (topic, partition) pair or producing messages to multiple
4
+ /// topics is not yet supported.
5
+ /// It should be added very soon.
6
+
1
7
use error:: { Result , Error } ;
2
8
use utils;
3
9
use protocol;
@@ -11,19 +17,35 @@ const CLIENTID: &'static str = "kafka-rust";
11
17
const DEFAULT_TIMEOUT : i32 = 120 ; // seconds
12
18
13
19
14
- #[ derive( Default ) ]
15
- #[ derive( Debug ) ]
20
+ /// Client struct. It keeps track of brokers and topic metadata
21
+ ///
22
+ /// # Examples
23
+ ///
24
+ /// ```no_run
25
+ /// let mut client = kafka::client::KafkaClient::new(&vec!("localhost:9092".to_string()));
26
+ /// let res = client.load_metadata_all();
27
+ /// ```
28
+ ///
29
+ /// You will have to load metadata before making any other request.
30
+ #[ derive( Default , Debug ) ]
16
31
pub struct KafkaClient {
17
- pub clientid : String ,
18
- pub timeout : i32 ,
19
- pub hosts : Vec < String > ,
20
- pub correlation : i32 ,
21
- pub conns : HashMap < String , KafkaConnection > ,
32
+ clientid : String ,
33
+ timeout : i32 ,
34
+ hosts : Vec < String > ,
35
+ correlation : i32 ,
36
+ conns : HashMap < String , KafkaConnection > ,
22
37
pub topic_partitions : HashMap < String , Vec < i32 > > ,
23
- pub topic_brokers : HashMap < String , String >
38
+ topic_brokers : HashMap < String , String >
24
39
}
25
40
26
41
impl KafkaClient {
42
+ /// Create a new instance of KafkaClient
43
+ ///
44
+ /// # Examples
45
+ ///
46
+ /// ```no_run
47
+ /// let mut client = kafka::client::KafkaClient::new(&vec!("localhost:9092".to_string()));
48
+ /// ```
27
49
pub fn new ( hosts : & Vec < String > ) -> KafkaClient {
28
50
KafkaClient { hosts : hosts. to_vec ( ) , clientid : CLIENTID . to_string ( ) ,
29
51
timeout : DEFAULT_TIMEOUT , ..KafkaClient :: default ( ) }
@@ -48,15 +70,18 @@ impl KafkaClient {
48
70
}
49
71
50
72
73
+ /// Resets and loads metadata for all topics.
51
74
pub fn load_metadata_all ( & mut self ) -> Result < ( ) > {
52
75
self . reset_metadata ( ) ;
53
76
self . load_metadata ( & vec ! ( ) )
54
77
}
55
78
79
+ /// Reloads metadata for a list of supplied topics
80
+ ///
81
+ /// returns Result<(), error::Error>
56
82
pub fn load_metadata ( & mut self , topics : & Vec < String > ) -> Result < ( ) > {
57
83
let resp = try!( self . get_metadata ( topics) ) ;
58
84
59
-
60
85
let mut brokers: HashMap < i32 , String > = HashMap :: new ( ) ;
61
86
for broker in resp. brokers {
62
87
brokers. insert ( broker. nodeid , format ! ( "{}:{}" , broker. host, broker. port) ) ;
@@ -81,6 +106,8 @@ impl KafkaClient {
81
106
Ok ( ( ) )
82
107
}
83
108
109
+ /// Clears metadata stored in the client. You must load metadata after this call if you want
110
+ /// to use the client
84
111
pub fn reset_metadata ( & mut self ) {
85
112
self . topic_partitions . clear ( ) ;
86
113
self . topic_brokers . clear ( ) ;
@@ -101,11 +128,25 @@ impl KafkaClient {
101
128
Err ( Error :: NoHostReachable )
102
129
}
103
130
104
- pub fn fetch_offsets ( & mut self ) {
131
+ /// Fetch offsets for a list of topics
132
+ /// Not implemented as yet.
133
+ pub fn fetch_offsets ( & mut self , _topics : & Vec < String > ) {
105
134
// TODO - Implement method to fetch offsets for more than 1 topic
106
135
107
136
}
108
137
138
+ /// Fetch offset for a topic.
139
+ /// It gets the latest offset only. Support for getting earliest will be added soon
140
+ ///
141
+ /// # Examples
142
+ ///
143
+ /// ```no_run
144
+ /// let mut client = kafka::client::KafkaClient::new(&vec!("localhost:9092".to_string()));
145
+ /// let res = client.load_metadata_all();
146
+ /// let offsets = client.fetch_topic_offset(&"my-topic".to_string());
147
+ /// ```
148
+ /// Returns a vector of (topic, partition offset data).
149
+ /// PartitionOffset will contain parition and offset info Or Error code as returned by Kafka.
109
150
pub fn fetch_topic_offset ( & mut self , topic : & String ) -> Result < Vec < ( String , Vec < utils:: PartitionOffset > ) > > {
110
151
// Doing it like this because HashMap will not return borrow of self otherwise
111
152
let partitions = self . topic_partitions
@@ -160,6 +201,20 @@ impl KafkaClient {
160
201
}
161
202
}
162
203
204
+ /// Fetch messages from Kafka
205
+ ///
206
+ /// It takes a single topic, parition and offset and return a vector of messages
207
+ /// or error::Error
208
+ /// You can figure out the appropriate partition and offset using client's
209
+ /// client.topic_partitions and client.fetch_topic_offset(topic)
210
+ ///
211
+ /// # Examples
212
+ ///
213
+ /// ```no_run
214
+ /// let mut client = kafka::client::KafkaClient::new(&vec!("localhost:9092".to_string()));
215
+ /// let res = client.load_metadata_all();
216
+ /// let msgs = client.fetch_messages(&"my-topic".to_string(), 0, 0);
217
+ /// ```
163
218
pub fn fetch_messages ( & mut self , topic : & String , partition : i32 , offset : i64 ) -> Result < Vec < utils:: OffsetMessage > > {
164
219
165
220
let host = self . get_broker ( topic, partition) . unwrap ( ) ;
@@ -171,16 +226,46 @@ impl KafkaClient {
171
226
Ok ( resp. get_messages ( ) )
172
227
}
173
228
229
+ /// Send a message to Kafka
230
+ ///
231
+ /// You can figure out the appropriate partition and offset using client's
232
+ /// client.topic_partitions and client.fetch_topic_offset(topic)
233
+ ///
234
+ /// `required_acks` - indicates how many acknowledgements the servers should receive before
235
+ /// responding to the request. If it is 0 the server will not send any response
236
+ /// (this is the only case where the server will not reply to a request).
237
+ /// If it is 1, the server will wait the data is written to the local log before sending
238
+ /// a response. If it is -1 the server will block until the message is committed by all
239
+ /// in sync replicas before sending a response. For any number > 1 the server will block
240
+ /// waiting for this number of acknowledgements to occur (but the server will never wait
241
+ /// for more acknowledgements than there are in-sync replicas).
242
+ ///
243
+ /// `timeout` - This provides a maximum time in milliseconds the server can await the
244
+ /// receipt of the number of acknowledgements in `required_acks`
245
+ /// `message` - A single message as a vector of u8s
246
+ ///
247
+ /// # Example
248
+ ///
249
+ /// ```no_run
250
+ /// let mut client = kafka::client::KafkaClient::new(&vec!("localhost:9092".to_string()));
251
+ /// let res = client.load_metadata_all();
252
+ /// let msgs = client.send_message(&"my-topic".to_string(), 0, 1,
253
+ /// 100, &"b".to_string().into_bytes());
254
+ /// ```
255
+ /// The return value will contain topic, partition, offset and error if any
256
+ /// OR error:Error
174
257
pub fn send_message ( & mut self , topic : & String , partition : i32 , required_acks : i16 ,
175
- timeout : i32 , message : & Vec < u8 > ) -> Result < protocol :: ProduceResponse > {
258
+ timeout : i32 , message : & Vec < u8 > ) -> Result < Vec < utils :: TopicPartitionOffset > > {
176
259
177
260
let host = self . get_broker ( topic, partition) . unwrap ( ) ;
178
261
179
262
let correlation = self . next_id ( ) ;
180
263
let req = protocol:: ProduceRequest :: new_single ( topic, partition, required_acks,
181
264
timeout, message, correlation, & self . clientid ) ;
182
265
183
- self . send_receive :: < protocol:: ProduceRequest , protocol:: ProduceResponse > ( & host, req)
266
+ let resp = try!( self . send_receive
267
+ :: < protocol:: ProduceRequest , protocol:: ProduceResponse > ( & host, req) ) ;
268
+ Ok ( resp. get_response ( ) )
184
269
185
270
}
186
271
0 commit comments