/etc/yum.repos.d/.cd /etc/yum.repos.d/
[Confluent.dist]name=Confluent repository (dist)baseurl=https://packages.confluent.io/rpm/5.1/7gpgcheck=1gpgkey=https://packages.confluent.io/rpm/5.1/archive.keyenabled=1[Confluent]name=Confluent repositorybaseurl=https://packages.confluent.io/rpm/5.1gpgcheck=1gpgkey=https://packages.confluent.io/rpm/5.1/archive.keyenabled=1
yum install librdkafka-devel
export CPPFLAGS=-I/usr/local/opt/openssl/include
export LDFLAGS=-L/usr/local/opt/openssl/lib
npm install i --unsafe-perm node-rdkafka
module.exports = {'bootstrap_servers': ["xxx.xx.xxx:xxxx"],'topic_name': 'xxx','group_id': 'xxx'}
Parameter | Description |
bootstrap_servers | Access network. On the Basic Info page of the instance in the console, select the Access Mode module and copy the network information from the Network column. |
topic_name | Topic name. Copy the name on the Topic List page in the console. |
group_id | You can define the name and see the consumer on the Consumer Group page after successful demo running. |
const Kafka = require('node-rdkafka');const config = require('./setting');console.log("features:" + Kafka.features);console.log(Kafka.librdkafkaVersion);var producer = new Kafka.Producer({'api.version.request': 'true',// Set the entry service. Obtain the corresponding service address in the console.'bootstrap.servers': config['bootstrap_servers'],'dr_cb': true,'dr_msg_cb': true,// The number of retries when a request error occurs. It is recommended to set this value to greater than 0 to ensure that the message is not lost to the maximum extent during failed retries.'retries': '0',// The time between the failed request transmission and the next retry request."retry.backoff.ms": 100,// The timeout period for producer network requests.'socket.timeout.ms': 6000,});var connected = falseproducer.setPollInterval(100);producer.connect();producer.on('ready', function() {connected = trueconsole.log("connect ok")});producer.on("disconnected", function() {connected = false;producer.connect();})producer.on('event.log', function(event) {console.log("event.log", event);});producer.on("error", function(error) {console.log("error:" + error);});function produce() {try {producer.produce(config['topic_name'],null,new Buffer('Hello CKafka Default'),null,Date.now());} catch (err) {console.error('Error occurred when sending message(s)');console.error(err);}}producer.on('delivery-report', function(err, report) {console.log("delivery-report: producer ok");});producer.on('event.error', function(err) {console.error('event.error:' + err);})setInterval(produce, 1000, "Interval");
node producer.js

const Kafka = require('node-rdkafka');const config = require('./setting');console.log(Kafka.features);console.log(Kafka.librdkafkaVersion);console.log(config)var consumer = new Kafka.KafkaConsumer({'api.version.request': 'true',// Set the entry service. Obtain the corresponding service address in the console.'bootstrap.servers': config['bootstrap_servers'],'group.id' : config['group_id'],// Consumer timeout interval when the Kafka consumer group mechanism is used. If the broker does not receive the heartbeat from the consumer within this interval,// the consumer is considered to be failed, and the broker initiates the rebalancing process again.'session.timeout.ms': 10000,// Client request timeout period. If no response is received after this period, the request times out and fails.'metadata.request.timeout.ms': 305000,// Set the interval of internal retries on the client.'reconnect.backoff.max.ms': 3000});consumer.connect();consumer.on('ready', function() {console.log("connect ok");consumer.subscribe([config['topic_name']]);consumer.consume();})consumer.on('data', function(data) {console.log(data);});consumer.on('event.log', function(event) {console.log("event.log", event);});consumer.on('error', function(error) {console.log("error:" + error);});consumer.on('event', function(event) {console.log("event:" + event);});
node consumer.js


Feedback