SpringCloud高级阶段上 传送门
第一章 负载均衡Ribbon
第二章 声明式调用Feign
第三章 服务容错保护Hystrix
SpringCloud高级阶段中 传送门
第四章 如何设计微服务
第五章 服务网关Zuul
第六章 分布式配置中心
SpringCloud高级阶段下 (当前所在位置)
第七章 消息总线Bus
第八章 消息驱动Stream
第九章 分布式服务跟踪Sleuth
SpringCloud bus集成了市面上常用的消息代理 ,(RabbitMQ ,Kafka) ,连接服务中的所有节点 .
当有数据有变更时 ,可以通过代理广播通知微服务及时变更数据 ,例如微服务的配置更新
它的出现解决了微服务数据变更 , 及时同步问题
图解:
在服务集群中 ,只需要对一个服务进行刷新 , 然后RabbitMQ会向所有服务通知消息,令他们也进行刷新
案例模拟
修改pom文件
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0modelVersion>
<parent>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-parentartifactId>
<version>2.1.7.RELEASEversion>
parent>
<groupId>ah.szxy.springcloudgroupId>
<artifactId>32config-refresh-UtilsartifactId>
<version>0.0.1-SNAPSHOTversion>
<build />
<properties>
<java.version>1.8java.version>
<spring-cloud.version>Greenwich.SR2spring-cloud.version>
<maven-jar-plugin.version>2.6maven-jar-plugin.version>
properties>
<dependencies>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-devtoolsartifactId>
<optional>trueoptional>
dependency>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-webartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-netflix-eureka-clientartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-netflix-zuulartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-config-serverartifactId>
dependency>
dependencies>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-dependenciesartifactId>
<version>${spring-cloud.version}version>
<type>pomtype>
<scope>importscope>
dependency>
dependencies>
dependencyManagement>
project>
修改全局配置文件 application.yml
spring:
application:
name: config-server-Git
cloud:
config:
server:
git: #Git 配置
uri: https://gitee.com/TimePause/SpringCloudConfig
username: git账号
password: git密码
server:
port: 9090
eureka:
client:
serviceUrl:
defaultZone: http://admin:admin@eureka1:8761/eureka/,http://admin:admin@eureka2:8761/eureka/
修改启动类
@SpringBootApplication
@EnableEurekaClient
@EnableConfigServer
public class Application {
public static void main(String[] args) {
SpringApplication.run(Application.class, args);
}
}
修改pom文件
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0modelVersion>
<parent>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-parentartifactId>
<version>2.1.7.RELEASEversion>
parent>
<groupId>ah.szxy.springcloudgroupId>
<artifactId>config-refresh-UtilsartifactId>
<version>0.0.1-SNAPSHOTversion>
<properties>
<java.version>1.8java.version>
<spring-cloud.version>Greenwich.SR2spring-cloud.version>
<maven-jar-plugin.version>2.6maven-jar-plugin.version>
properties>
<dependencies>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-webartifactId>
dependency>
<dependency>
<groupId>org.apache.httpcomponentsgroupId>
<artifactId>httpclientartifactId>
dependency>
<dependency>
<groupId>commons-logginggroupId>
<artifactId>commons-loggingartifactId>
<version>1.2version>
dependency>
dependencies>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-dependenciesartifactId>
<version>${spring-cloud.version}version>
<type>pomtype>
<scope>importscope>
dependency>
dependencies>
dependencyManagement>
project>
HTTPClient工具类
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.http.NameValuePair;
import org.apache.http.client.entity.UrlEncodedFormEntity;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.message.BasicNameValuePair;
import org.apache.http.util.EntityUtils;
public class HttpClientUtil {
public static String doGet(String url, Map<String, String> param) {
// 创建Httpclient对象
CloseableHttpClient httpclient = HttpClients.createDefault();
String resultString = "";
CloseableHttpResponse response = null;
try {
// 创建uri
URIBuilder builder = new URIBuilder(url);
if (param != null) {
for (String key : param.keySet()) {
builder.addParameter(key, param.get(key));
}
}
URI uri = builder.build();
// 创建http GET请求
HttpGet httpGet = new HttpGet(uri);
// 执行请求
response = httpclient.execute(httpGet);
// 判断返回状态是否为200
if (response.getStatusLine().getStatusCode() == 200) {
resultString = EntityUtils.toString(response.getEntity(), "UTF-8");
}
} catch (Exception e) {
e.printStackTrace();
} finally {
try {
if (response != null) {
response.close();
}
httpclient.close();
} catch (IOException e) {
e.printStackTrace();
}
}
return resultString;
}
public static String doGet(String url) {
return doGet(url, null);
}
public static String doPost(String url, Map<String, String> param) {
// 创建Httpclient对象
CloseableHttpClient httpClient = HttpClients.createDefault();
CloseableHttpResponse response = null;
String resultString = "";
try {
// 创建Http Post请求
HttpPost httpPost = new HttpPost(url);
// 创建参数列表
if (param != null) {
List<NameValuePair> paramList = new ArrayList<>();
for (String key : param.keySet()) {
paramList.add(new BasicNameValuePair(key, param.get(key)));
}
// 模拟表单
UrlEncodedFormEntity entity = new UrlEncodedFormEntity(paramList,"utf-8");
httpPost.setEntity(entity);
}
// 执行http请求
response = httpClient.execute(httpPost);
resultString = EntityUtils.toString(response.getEntity(), "utf-8");
} catch (Exception e) {
e.printStackTrace();
} finally {
try {
response.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
return resultString;
}
public static String doPost(String url) {
return doPost(url, null);
}
public static String doPostJson(String url, String json) {
// 创建Httpclient对象
CloseableHttpClient httpClient = HttpClients.createDefault();
CloseableHttpResponse response = null;
String resultString = "";
try {
// 创建Http Post请求
HttpPost httpPost = new HttpPost(url);
// 创建请求内容
StringEntity entity = new StringEntity(json, ContentType.APPLICATION_JSON);
httpPost.setEntity(entity);
// 执行http请求
response = httpClient.execute(httpPost);
resultString = EntityUtils.toString(response.getEntity(), "utf-8");
} catch (Exception e) {
e.printStackTrace();
} finally {
try {
response.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
return resultString;
}
public static void main(String[] args) {
//刷新服务请求
String url ="http://localhost:9091/actuator/bus-refresh";
String doPostJson = HttpClientUtil.doPost(url);
System.out.println(url);
}
}
修改pom文件
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0modelVersion>
<parent>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-parentartifactId>
<version>2.1.7.RELEASEversion>
parent>
<groupId>ah.szxy.springcloudgroupId>
<artifactId>config-clientartifactId>
<version>0.0.1-SNAPSHOTversion>
<build />
<properties>
<java.version>1.8java.version>
<spring-cloud.version>Greenwich.SR2spring-cloud.version>
<maven-jar-plugin.version>2.6maven-jar-plugin.version>
properties>
<dependencies>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-bus-amqpartifactId>
dependency>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-devtoolsartifactId>
<optional>trueoptional>
dependency>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-webartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-netflix-eureka-clientartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-netflix-zuulartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-configartifactId>
dependency>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-actuatorartifactId>
dependency>
dependencies>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-dependenciesartifactId>
<version>${spring-cloud.version}version>
<type>pomtype>
<scope>importscope>
dependency>
dependencies>
dependencyManagement>
project>
spring:
application:
name: config-client
cloud:
config:
discovery:
service-id: config-server-Git #config-server的服务名
enabled: true #开关
profile: dev #使用什么环境 test/dev/prod/default
label: master #git标签
rabbitmq: #配置RabbitMQ相关配置
host: 192.168.179.136
port: 5672
username: mquser
password: mquser
virtual-host: /
listener:
simple:
retry:
enabled: true #开启重试
max-attempts: 5 #重试次数
server:
port: 9091
eureka:
client:
serviceUrl:
defaultZone: http://admin:admin@eureka1:8761/eureka/,http://admin:admin@eureka2:8761/eureka/
management:
# 2.0之前需要关闭防护
#security:
# enable: false
endpoints:
web:
exposure:
include: bus-refresh
#include: "*" #暴露所有刷新的方式
#include: refresh
controller
/**
*在springioc容器中,bean为单例模式,其值配注入后不会改变,刷新后也不会变化,需要@RefreshScope注解刷新属性
* @author chy
*
*/
@Controller
@RefreshScope
public class showMsgController {
@Value("${E-Book}")
private String msg;
@RequestMapping("showMsg")
@ResponseBody
public String showMsg() {
return msg;
}
}
启动类
@SpringBootApplication
@EnableEurekaClient
public class ConfigClientApplication {
public static void main(String[] args) {
SpringApplication.run(ConfigClientApplication.class, args);
}
}
a. 启动config-server、启动两个config-client。
测试是否正常启动,是否正常读取文件,访问集群的两个节点,查看是否获取到参数。
b. 修改git端参数,访问http://localhost:9091/showMsg 测试是否获取到最新的参数,
c.发送总线刷新请求http://localhost:9091/actuator/bus-refresh(post方式)
(2.0以下版本路径为http://localhost:9091/bus/refresh)
例如:curl -X POST http://localhost:9091/actuator/bus-refresh
d. 控制台查看集群项目是否刷新,访问http://localhost:9091/showMsg测试是否获取到最新的参数。
注: 只需要刷新一个配置中心客户端项目,集群的其他项目的配置文件也会被自动刷新
图解:
基于配置中心服务端刷新配置文件 ,发送刷新请求后配置中心的服务端和所有客户端都会刷新
案例模拟
可以使用上面采用基于配置中心客户端刷新的代码进行改进
1.修改配置中心的服务端
在原项目中添加Bus-RabbitMQ坐标
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-bus-amqpartifactId>
dependency>
注意 :在上个项目中 ,配置中心的客户端集群都加上了这个坐标 ,只有配置中心的服务端没有加上这个坐标
修改全局配置文件
spring:
application:
name: config-server-Git
cloud:
bus: #springboot 默认开启了权限拦截 会导致 /refresh 出现 401 ,拒绝访 问
enabled: true
refresh:
enabled: true
config:
server:
git: #Git 配置
uri: https://gitee.com/TimePause/SpringCloudConfig
username: [email protected](码云账号)
password: 码云密码
server:
port: 9090
eureka:
client:
serviceUrl:
defaultZone: http://admin:admin@eureka1:8761/eureka/,http://admin:admin@eureka2:8761/eureka/
#不能适用“*”,会冲突
management:
endpoints:
web:
exposure:
include: bus-refresh
测试
a.启动两个配置中心客户端伪集群与配置中心服务端
b.通过客户端访问git上的配置文件,查看结果
c.修改git上配置文件后,客户端再次访问查看,查看是否同步数据? 否
d.使用cmd发送对服务端刷新的命令 curl -X POST http://localhost:9091/actuator/bus-refresh
,查看控制台上客户端和服务端是否重启? 是
e.通过访问两个客户端集群,查看数据是否发生变化? 是
可以通过cmd或者HttpClient工具类进行刷新
需要指定post的请求
列如使用cmd发送post请求 curl -X POST ......
1 刷新指定服务
http://Config-Server/actuator/bus-refresh?destination=需要刷新的服务名称:端口
2 刷新指定集群
http://Config-Server/actuator/bus-refresh?destination=需要刷新的服务名称:**
点击查看Spring Cloud Stream中文指导手册
1.修改pom文件
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0modelVersion>
<parent>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-parentartifactId>
<version>2.1.7.RELEASEversion>
parent>
<groupId>ah.szxy.springcloudgroupId>
<artifactId>33stream-receiverartifactId>
<version>0.0.1-SNAPSHOTversion>
<build />
<properties>
<java.version>1.8java.version>
<spring-cloud.version>Greenwich.SR2spring-cloud.version>
<maven-jar-plugin.version>2.6maven-jar-plugin.version>
properties>
<dependencies>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-devtoolsartifactId>
<optional>trueoptional>
dependency>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-webartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-netflix-eureka-clientartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-netflix-zuulartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-stream-rabbitartifactId>
dependency>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-testartifactId>
<scope>testscope>
dependency>
dependencies>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-dependenciesartifactId>
<version>${spring-cloud.version}version>
<type>pomtype>
<scope>importscope>
dependency>
dependencies>
dependencyManagement>
project>
2.修改全局配置文件application.yml
spring:
application:
name: stream-receiver
rabbitmq: #配置RabbitMQ连接参数
host: 192.168.179.136
port: 5672
username: mquser
password: mquser
listener:
simple:
retry:
enabled: true #开启重试
max-attempts: 5 #重试次数
server:
port: 6666
eureka:
client:
serviceUrl:
defaultZone: http://admin:admin@eureka1:8761/eureka/,http://admin:admin@eureka2:8761/eureka/
3.创建消息发送接口
@Output(“自定义消息交换器名”)
返回值SubscribableChannel
public interface SenderService {
@Output("chy-exchange")
SubscribableChannel send();
}
4.启动类
添加@EnableBinding注解,绑定消息发送的接口
@SpringBootApplication
@EnableEurekaClient
@EnableBinding(value={SenderService.class})
public class StreamSenderApplication {
public static void main(String[] args) {
SpringApplication.run(StreamSenderApplication.class, args);
}
}
5.测试类
发送一条消息,测试接收者能否收到
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.messaging.Message;
import org.springframework.messaging.support.MessageBuilder;
import org.springframework.test.context.junit4.SpringRunner;
import ah.szxy.SenderService;
import ah.szxy.StreamSenderApplication;
@RunWith(SpringRunner.class)
@SpringBootTest(classes=StreamSenderApplication.class)
public class TestSend {
@Autowired
//注入创建的接口,实际上spring会注入其代理对象。
private SenderService senderService;
@Test
public void testSend() {
String msg="想要时间静止,却敌不过事态变迁...";
//将消息封装成Message
Message message=MessageBuilder.withPayload(msg).build();
//适用创建的接口获取SubscribableChannel对象,使用其send方法放message
senderService.send().send(message);
}
}
1.修改pom文件
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0modelVersion>
<parent>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-parentartifactId>
<version>2.1.7.RELEASEversion>
parent>
<groupId>ah.szxy.springcloudgroupId>
<artifactId>33stream-senderartifactId>
<version>0.0.1-SNAPSHOTversion>
<build />
<properties>
<java.version>1.8java.version>
<spring-cloud.version>Greenwich.SR2spring-cloud.version>
<maven-jar-plugin.version>2.6maven-jar-plugin.version>
properties>
<dependencies>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-devtoolsartifactId>
<optional>trueoptional>
dependency>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-webartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-netflix-eureka-clientartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-netflix-zuulartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-stream-rabbitartifactId>
dependency>
dependencies>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-dependenciesartifactId>
<version>${spring-cloud.version}version>
<type>pomtype>
<scope>importscope>
dependency>
dependencies>
dependencyManagement>
project>
2.修改全局配置文件 application.yml
spring:
application:
name: 33stream-sender
rabbitmq: #配置RabbitMQ连接参数
host: 192.168.179.136
port: 5672
username: mquser
password: mquser
listener:
simple:
retry:
enabled: true #开启重试
max-attempts: 5 #重试次数
server:
port: 5555
eureka:
client:
serviceUrl:
defaultZone: http://admin:admin@eureka1:8761/eureka/,http://admin:admin@eureka2:8761/eureka/
3.创建消息接收的接口
@Input(“自定义的交换器,需要与消息发送者一致”)
public interface Receive {
@Input("chy-exchange")
SubscribableChannel receive();
}
4.创建消息处理的实现类 (相当于消息发送者的测试类)
@EnableBinding绑定消息接收接口
@StreamListener监听指定交换器
/**
* 处理消息
* @author chy
*
*/
@Service
@EnableBinding(Receive.class)
public class ReceiveService {
@StreamListener("chy-exchange")
public void onReceive(byte[] msg) {
//处理消息
System.out.println("Receive:"+new String(msg));
}
}
5.创建消息发送者的启动类
@EnableBinding绑定消息接收接口
@SpringBootApplication
@EnableEurekaClient
@EnableBinding(value={Receive.class})
public class StreamSenderApplication {
public static void main(String[] args) {
SpringApplication.run(StreamSenderApplication.class, args);
}
}
测试:
启动消息接收者,然后启动消息发送者的测试类
RabbitMQ
消息接收者的控制台
Stream 是对消息队列 进一步优化, 我们只需要通过@output()创建交换器 ,它就会为我们自动创建对应的消息队列( 名称随机 )
而我们只需要通过@input就可以自动接收该校换器下的队列 ,进而省去我们指定队列的麻烦
而且, 更换消息队列软件(RabbitMQ,Kafka等)更加方便
产生背景
1.复制上面的消息发送者,pom文件不变,需要修改项目名
2.修改全局配置文件
spring:
application:
name: stream--group-receiver
rabbitmq: #配置RabbitMQ连接参数
host: 192.168.179.136
port: 5672
username: mquser
password: mquser
listener:
simple:
retry:
enabled: true #开启重试
max-attempts: 5 #重试次数
cloud:
stream:
bindings:
OutPut: #设置别名 , 消息发送者接口填@Output("OutPut")
destination: chy-Exchange #绑定自定义交换器
server:
port: 6666
eureka:
client:
serviceUrl:
defaultZone: http://admin:admin@eureka1:8761/eureka/,http://admin:admin@eureka2:8761/eureka/
public class Product implements Serializable{
private Integer id;
private String name;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
public String toString() {
return "Product [id=" + id + ", name=" + name + "]";
}
public Product(Integer id, String name) {
super();
this.id = id;
this.name = name;
}
public Product() {
super();
}
}
public interface SenderService {
String OUTPUT="OutPut";// cloud.stream.bindings后面的属性值
@Output(OUTPUT)
SubscribableChannel send();
}
@RunWith(SpringRunner.class)
@SpringBootTest(classes=StreamSenderApplication.class)
public class TestSend {
@Autowired
//注入创建的接口,实际上spring会注入其代理对象。
private SenderService senderService;
@Test
public void testSend() {
//String msg="想要时间静止,却敌不过事态变迁...";
Product product = new Product(1, "普通攻击全体攻击二次连击这样的妈妈你喜欢吗?");
//将消息封装成Message
Message message=MessageBuilder.withPayload(product).build();
//适用创建的接口获取SubscribableChannel对象,使用其send方法放message
senderService.send().send(message);
}
}
1.复制上面的消息接收者,pom文件不变,需要修改项目名
2.修改全局配置文件 application.yml
spring:
application:
name: stream-group-receiver
rabbitmq: #配置RabbitMQ连接参数
host: 192.168.179.136
port: 5672
username: mquser
password: mquser
listener:
simple:
retry:
enabled: true #开启重试
max-attempts: 5 #重试次数
cloud:
stream:
bindings:
InPut: #@StreamListener("InPut")
destination: chy-Exchange # #绑定自定义交换器
group: groupProduct # 具体分组 : 对应 MQ的队列名称 ,并且持久化队列
server:
port: 5555
eureka:
client:
serviceUrl:
defaultZone: http://admin:admin@eureka1:8761/eureka/,http://admin:admin@eureka2:8761/eureka/
public interface Receive {
//这样设置而不是直接使用的目的是方便修改,当binds后面的参数改变的时候,这个也需要改变
String INPUT="InPut";
@Input(INPUT)
SubscribableChannel receive();
}
/**
* 处理消息
* @author chy
*
*/
@Service
@EnableBinding(Receive.class)
public class ReceiveService {
@StreamListener(Receive.INPUT)
public void onReceive(byte[] msg) {
//处理消息
System.out.println("Receive:"+new String(msg));
}
}
测试
登陆RabbitMQ图形化界面,查看队列属性信息
启动消息接收者 ,启动消息接收者 ,启动消息发送者的测试类,查看消息接收者的控制台
复制上面的消息接收者 ,只需更改项目以及端口号 ,让两个消息接收者同时运行
可以看到消息接收者只会有一个接收到消息(下图1,2)
而且队列只会有一组(图3)
相同的消息发送到相同的服务中
背景模拟
当我们使用消息发送者同时发送10条或更多消息向消息接收者分组集群中,可以看到集群中的每一个节点都会收到不同数量甚至是不同的的消息 ,且每次发送都会都会随机改变 , 但是我们希望能够令相同的消息发送到相同的服务中
案例模拟
1.复制上面项目即可,附上pom文件
<properties>
<java.version>1.8java.version>
<spring-cloud.version>Greenwich.SR2spring-cloud.version>
<maven-jar-plugin.version>2.6maven-jar-plugin.version>
properties>
<dependencies>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-devtoolsartifactId>
<optional>trueoptional>
dependency>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-webartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-netflix-eureka-clientartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-netflix-zuulartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-stream-rabbitartifactId>
dependency>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-testartifactId>
<scope>testscope>
dependency>
dependencies>
2.修改全局配置文件
spring:
application:
name: stream-group-sender
rabbitmq: #配置RabbitMQ连接参数
host: 192.168.179.136
port: 5672
username: mquser
password: mquser
listener:
simple:
retry:
enabled: true #开启重试
max-attempts: 5 #重试次数
cloud:
stream:
bindings:
OutPut: #设置别名 , 消息发送者接口填@Output("OutPut")
destination: chy-Exchange #绑定自定义交换器
producer:
partition-key-expression: #通过该参数指定了分区键的表达式规则
payload
partition-count: 2 #指定了消息分区的数量。
server:
port: 6666
eureka:
client:
serviceUrl:
defaultZone: http://admin:admin@eureka1:8761/eureka/,http://admin:admin@eureka2:8761/eureka/
1.复制上面项目,附上pom文件
<properties>
<java.version>1.8java.version>
<spring-cloud.version>Greenwich.SR2spring-cloud.version>
<maven-jar-plugin.version>2.6maven-jar-plugin.version>
properties>
<dependencies>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-devtoolsartifactId>
<optional>trueoptional>
dependency>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-webartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-netflix-eureka-clientartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-netflix-zuulartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-stream-rabbitartifactId>
dependency>
dependencies>
2.修改全局配置文件 application.yml
spring:
application:
name: stream-partition-receiver
rabbitmq: #配置RabbitMQ连接参数
host: 192.168.179.136
port: 5672
username: mquser
password: mquser
listener:
simple:
retry:
enabled: true #开启重试
max-attempts: 5 #重试次数
cloud:
stream:
bindings:
InPut: #@StreamListener("InPut")
destination: chy-Exchange # #绑定自定义交换器
group: groupProduct # 具体分组 : 对应 MQ的队列名称 ,并且持久化队列
consumer:
partitioned: true
instance-count: 2 #指定了当前消费者的总实例数量
instance-index: 0 #设置当前实例的索引号,从 0 开始
server:
port: 5555
eureka:
client:
serviceUrl:
defaultZone: http://admin:admin@eureka1:8761/eureka/,http://admin:admin@eureka2:8761/eureka/
3.复制这个项目 ,修改项目名和端口号 ,创建消息接收者集群
4,消息发送者的测试类
@RunWith(SpringRunner.class)
@SpringBootTest(classes=StreamSenderApplication.class)
public class TestSend {
@Autowired
//注入创建的接口,实际上spring会注入其代理对象。
private SenderService senderService;
@Test
public void testSend() {
//String msg="想要时间静止,却敌不过事态变迁...";
Product product = new Product(1, "普通攻击全体攻击二次连击这样的妈妈你喜欢吗?");
//将消息封装成Message
Message message=MessageBuilder.withPayload(product).build();
for (int i = 0; i < 10; i++) {
//适用创建的接口获取SubscribableChannel对象,使用其send方法放message
senderService.send().send(message);
}
}
}
测试
使用消息发送者想消息接收者同时发送10条消息
可以看到10条消息都被发送给一个消息接收者 ,而不是发送到两个接收者
在实际操作中 ,我们可以根据需求灵活使用消息分组和分区
1.创建项目,修改pom文件
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0modelVersion>
<parent>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-parentartifactId>
<version>2.1.7.RELEASEversion>
parent>
<groupId>ah.szxy.springcloudgroupId>
<artifactId>40Sleuth-Product-ServiceartifactId>
<version>0.0.1-SNAPSHOTversion>
<build />
<properties>
<java.version>1.8java.version>
<spring-cloud.version>Greenwich.SR2spring-cloud.version>
<maven-jar-plugin.version>2.6maven-jar-plugin.version>
properties>
<dependencies>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-webartifactId>
dependency>
dependencies>
project>
2.创建服务接口
/**
* 产品服务接口
* @author chy
*
*/
@RequestMapping("/product")
public interface ProductService {
@RequestMapping(value="/find",method=RequestMethod.GET)
public List<Product> findAll();
}
3.使用逆向工具生成的实体类 ,省略
1.创建项目,修该pom文件
主要是添加了sleuth坐标
<properties>
<java.version>1.8java.version>
<spring-cloud.version>Greenwich.SR2spring-cloud.version>
<maven-jar-plugin.version>2.6maven-jar-plugin.version>
properties>
<dependencies>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-webartifactId>
dependency>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-thymeleafartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starterartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-netflix-eureka-clientartifactId>
dependency>
<dependency>
<groupId>org.mybatis.spring.bootgroupId>
<artifactId>mybatis-spring-boot-starterartifactId>
<version>1.1.1version>
dependency>
<dependency>
<groupId>mysqlgroupId>
<artifactId>mysql-connector-javaartifactId>
dependency>
<dependency>
<groupId>com.alibabagroupId>
<artifactId>druidartifactId>
<version>1.0.9version>
dependency>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-devtoolsartifactId>
<optional>trueoptional>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-sleuthartifactId>
dependency>
<dependency>
<groupId>ah.szxy.springcloudgroupId>
<artifactId>40Sleuth-Product-ServiceartifactId>
<version>0.0.1-SNAPSHOTversion>
dependency>
dependencies>
2.修改全局配置文件 application.yml
spring:
application:
name: Sleuth-Product-Provider
datasource:
driverClassName: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://localhost:3306/book-product?useUnicode=true&characterEncoding=gbk&useJDBCCompliantTimezoneShift=true&serverTimezone=UTC
username: root
password: root
type: com.alibaba.druid.pool.DruidDataSource
server:
port: 8001
compression: #springboot的gzip配置
enabled: true #是否启用压缩
mime-types: #配置压缩支持的 MIME TYPE
- application/json,application/ xml,text/html,text/xml,text/plain
mybatis:
type-aliases-package: ah.szxy.product.pojo
mapper-locations:
- classpath:ah/szxy/product/mapper/*.xml
eureka:
client:
serviceUrl:
defaultZone: http://admin:admin@eureka1:8761/eureka/,http://admin:admin@eureka2:8761/eureka/
3.主要controller
其他省略
@RestController
public class ProductController implements ProductService{
@Autowired
private ProductServiceImpl productServiceImpl;
@Override
public List<Product> findAll() {
return this.productServiceImpl.findAll();
}
}
1.创建项目,修该pom文件
主要在以前的基础上添加了Sleuth的坐标
<properties>
<java.version>1.8java.version>
<spring-cloud.version>Greenwich.SR2spring-cloud.version>
<maven-jar-plugin.version>2.6maven-jar-plugin.version>
properties>
<dependencies>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-webartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starterartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-netflix-eureka-clientartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-openfeignartifactId>
dependency>
<dependency>
<groupId>ah.szxy.springcloudgroupId>
<artifactId>E-Book-Product-ServiceartifactId>
<version>0.0.1-SNAPSHOTversion>
dependency>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-devtoolsartifactId>
<optional>trueoptional>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-sleuthartifactId>
dependency>
dependencies>
2.修改全局配置文件 application.yml
spring:
application:
name: Sleuth-Consumer
server:
port: 8888
ribbon: #解决ReadTimeOut的问题
ReadTimeout: 60000
ConnectTimeout: 60000
eureka:
client:
serviceUrl:
defaultZone: http://admin:admin@eureka1:8761/eureka/,http://admin:admin@eureka2:8761/eureka/
3.controller
其他省略
/**
* 创建订单
* @author chy
*
*/
@RestController
public class ConsumerController {
@Autowired
private ConsumerProductService productService;
/**
* 模拟内容: 登录 查看产品 下订单
*/
@RequestMapping(value="/find",method=RequestMethod.GET)
public List<Product> createOrder() {
List<Product> list = this.productService.findAll();
for(Product product:list) {
System.out.println(product.getName());
}
System.out.println("+++++++++++++++++++");
return list;
}
}
测试结果
添加日志的配置文件logback.xml,修改日志级别为debug ,控制台会出现以下参数
ElasticSearch是一个基于Lucene的搜索服务器。它提供了一个分布式多用户能力的全文搜索引擎,基于RESTful web接口。Elasticsearch是用Java语言开发的,并作为Apache许可条款下的开放源码发布,是一种流行的企业级搜索引擎。ElasticSearch用于云计算中,能够达到实时搜索,稳定,可靠,快速,安装使用方便。官方客户端在Java、.NET(C#)、PHP、Python、Apache Groovy、Ruby和许多其他语言中都是可用的。根据DB-Engines的排名显示,Elasticsearch是最受欢迎的企业搜索引擎,其次是Apache Solr,也是基于Lucene。
注意:
1.安装 elasticsearch 时 linux 内核必须是 3.5+ 查看当前内核命令: uname -a
2.jdk 必须是 jdk1.8.0_131 以上版本 ,查看当前java版本 java -version
3.如果是在虚拟机上安装,虚拟机分配至少 1.5G 以上的内存 ,查看并设置虚拟机内存如下图
单机版安装步骤
Linux 内核升级步骤
内核版本提升后,对每个线程的内存分配提升了,elasticsearch 需要至少 4096 的线程才 能正常启动。 所以需要为虚拟机分配至少 1.5G 以上的内存。
升级内核
#步骤一
rpm --import http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-7
#步骤二
rpm -Uvh http://www.elrepo.org/elrepo-release-6-8.el6.elrepo.noarch.rpm
#步骤三
yum --enablerepo=elrepo-kernel install kernel-lt -y
注:如果执行步骤二 : rpm -Uvh http://www.elrepo.org/elrepo-release-6-8.el6.elrepo.noarch.rpm语句错,说明nss版本老旧,需要更新,yum -y install nss
,重新执行该命令即可
vim /etc/grub.conf
修改文件中内容,保证使用新内核启动。
文件中写入 : default=0
重启系统 reboot now
下载elasticsearch6.2.3: https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.2.3.tar.gz
上传至linux ,解压,复制到/usr/localhost,将文件名elasticsearch-6.2.3 修改成elasticsearch
修改 elasticsearch 需要的系统配置。
vi /etc/security/limits.conf
增加下述内容。
* soft nofile 65536
* hard nofile 65536
vi /etc/security/limits.d/90-nproc.conf
es 启动时的线程池最低容量 修改下述内容
* soft nproc 4096
root soft nproc unlimited
vi /etc/sysctl.conf
新增下述内容 vm.max_map_count=655360
使用命令,让 sysctl 配置生效 sysctl -p
修改 elasticsearch 的配置文件(在它的config目录下) ,设置可访问的客户端。0.0.0.0 代表任意客户端访问。
vi config/elasticsearch.yml
修改下述内容(都在注释内,去掉注释)
network.host: 0.0.0.0
http.port: 9200
创建用户
从 5.0 开始,ElasticSearch 安全级别提高了,不允许采用 root 帐号启动,所以我们要添加一个用户。 (在它的config目录下)
groupadd elk
useradd elkuser
passwd elkuser
usermod -G elk elkuser
visudo
elkuser ALL=(ALL) ALL
chown -R elkuser:elk /usr/local/elasticsearch
启动ElasticSearch
切换用户 su elkuser
启动
前置启动 ./usr/local/elasticsearch/bin/elasticsearch
后置启动 ./usr/local/elasticsearch/bin/elasticsearch -d
验证
访问http:/ip:9200/
需要关闭防火墙或者开放9200端口。service iptables stop
Head 插件简介
ElasticSearch-head 是一个 H5 编写的 ElasticSearch 集群操作和管理工具,可以对集群进行傻瓜式操作。 它提供了:
a.索引和节点级别操作
b.搜索接口能够查询集群中原始 json 或表格格式的检索数据
c.能够快速访问并显示集群的状态
安装环境
安装 NodeJS
要求在 root 下执行
curl -sL https://rpm.nodesource.com/setup_8.x | bash -
yum install -y nodejs
安装 npm
npm install -g cnpm --registry=https://registry.npm.taobao.org
使用 npm 安装 grunt
#命令1
npm install grunt --save-dev
#命令二
npm install -g grunt-cli --registry=https://registry.npm.taobao.org --no-proxy
安装Head
切换至elkuser用户 su elkuser
查看以上环境的版本 (观察是否有错误信息)
node -v
npm -v
grunt -version
切换回root用户(su root
),创建一个目录,下载 head 插件源码 并解压
cd /user/local
mkdir es
wget https://github.com/mobz/elasticsearch-head/archive/master.zip
unzip master
7.国内镜像安装
cd elasticsearch-head-master
sudo npm install -g cnpm --registry=https://registry.npm.taobao.org
sudo cnpm install
配置 ElasticSearch,使得 HTTP 对外提供服务
vi config/elasticsearch.yml
1.添加如下内容
增加如下的参数,这样 head 插件可以访问 es。设置参数的时候:后面要有空格
http.cors.enabled: true
http.cors.allow-origin: "*"
2.进入es目录的elasticsearch-head-master目录下,修改 Head 插件配置文件
vim Gruntfile.js
找到 connect:server,添加 hostname 一项,如下
connect: {
server: {
options: {
hostname: '0.0.0.0',
port: 9100,
base: '.',
keepalive: true
}
}
}
启动(elkuser用户)
1.重启 elasticsearch
./elasticsearch -d
2.启动 head(root用户)
elasticsearch-head-master 目录下:
grunt server
或 npm run start
3.访问 9100 端口
链接 elasticsearch的9200端口
测试
1.创建索引(elkuser用户,elasticsearch的bin目录下)
curl -XPUT http://192.168.179.131]8:9200/applog
2.查看 head 变化
http://192.168.179.138:9100/
下载压缩,复制至指指定文件夹
wget https://artifacts.elastic.co/downloads/logstash/logstash-6.2.3.tar.gz
tar zxvf logstash-6.2.3.tar.gz
cp logstash-6.2.3 /usr/local/logstash -rf
注:Logstash 文件较大(131MB),可以点击https://artifacts.elastic.co/downloads/logstash/logstash-6.2.3.tar.gz 直接下载,下载后然后放入虚拟机中,虚拟机下载速度较慢
前置测试 (logstash主目录下)
1. 前置启动 ./bin/logstash -e 'input { stdin { } } output { stdout {} }'
2. 出现上图结果后, 使用浏览器访问 `http://192.168.179.138:9200/_search?pretty
注意:启动速度慢,而且第一次启动是前置启动,需要ctrl+c退出
修改配置 ( 在 logstash的主目录下, 修改input和output的host和index的属性值 )
vim config/log_to_es.conf
内容如下:
# For detail structure of this file
# Set: https://www.elastic.co/guide/en/logstash/current/configuration-file-structure.html
input {
# For detail config for log4j as input,
# See: https://www.elastic.co/guide/en/logstash/current/plugins-inputs-log4j.html
tcp {
mode => "server"
host => "192.168.179.138"
port => 9250
}
}
filter {
#Only matched data are send to output.
}
output {
# For detail config for elasticsearch as output,
# See: https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html
elasticsearch {
action => "index" #The operation on ES
hosts => "192.168.179.138:9200" #ElasticSearch host, can be array.
index => "applog" #The index to write data to.
}
}
启动( logstash 主目录下 )
#前置启动
./bin/logstash -f config/log_to_es.conf
#或后台运行守护进程
./bin/logstash -f config/log_to_es.conf &
测试
curl 'http://192.168.40.131:9200/_search?pretty'
或使用浏览器访问 http://192.168.179.138:9200/_search?pretty
tar -zxf kibana-6.2.3-linux-x86_64.tar.gz
下载压缩
wget https://artifacts.elastic.co/downloads/kibana/kibana-6.2.3-linux-x86_64.tar.gz
#解压文件
tar zxvf kibana-6.2.3-linux-x86_64.tar.gz
#复制到local目录下,并将文件夹名称改为kibana
cp kibana-6.2.3-linux-x86_64 /usr/local/kibana -rf
修改配置
vim config/kibana.yml
根据自己的情况, 修改url的值,可以把以下注释放开,使配置起作用 ;
也可以直接将这些内容直接放到文件头即可
server.port: 5601
server.host: "0.0.0.0"
elasticsearch.url: http://192.168.179.138:9200
kibana.index: ".kibana"
启动( kibana主目录下 )
./bin/kibana
测试
http://192.168.179.138:5601/app/kibana
ELK 的使用 (操作界面 )
复制上次的sleuth-product-provider项目
添加依赖
<dependency>
<groupId>net.logstash.logbackgroupId>
<artifactId>logstash-logback-encoderartifactId>
<version>5.0version>
dependency>
修改logback.xml
使logback想logstash输出日志信息,地址在logstash的配置文件中有设置。
<configuration>
<include resource="org/springframework/boot/logging/logback/defaults.xml" />
<springProperty scope="context" name="springAppName"
source="spring.application.name" />
<property name="LOG_FILE" value="${BUILD_FOLDER:-build}/${springAppName}" />
<property name="CONSOLE_LOG_PATTERN"
value="%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}" />
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFOlevel>
filter>
<encoder>
<pattern>${CONSOLE_LOG_PATTERN}pattern>
<charset>utf8charset>
encoder>
appender>
<appender name="logstash"
class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<destination>192.168.179.138:9250destination>
<encoder
class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>UTCtimeZone>
timestamp>
<pattern>
<pattern>
{
"severity": "%level",
"service": "${springAppName:-}",
"trace": "%X{X-B3-TraceId:-}",
"span": "%X{X-B3-SpanId:-}",
"exportable": "%X{X-Span-Export:-}",
"pid": "${PID:-}",
"thread": "%thread",
"class": "%logger{40}",
"rest": "%message"
}
pattern>
pattern>
providers>
encoder>
appender>
<root level="DEBUG">
<appender-ref ref="console" />
<appender-ref ref="logstash" />
root>
configuration>
复制上次的sleuth-consumer项目
添加依赖 ,logstash 的坐标,同上
修改logback.xml 同上
测试
启动provider、consumer按发送请求,产生日志数据
查看Elasticsearch是否收到日志,使用kibana查看日志 ,
查看Elasticsearch http://192.168.179.138:9100/
中文版论文地址:http://bigbully.github.io/Dapper-translation/
在spring boot2.0以上的版本中,官方不再支持使用自建Zipkin Server的方式进行服务链路追踪,而是直接提供了编译好的 jar 包来给我们使用。
下载服务端
https://search.maven.org/remote_content?g=io.zipkin.java&a=zipkin-server&v=LATEST&c=exec
或者通过命令下载
curl -sSL https://zipkin.io/quickstart.sh | bash -s
上传到服务器启动或者放到本地启动
java -jar zipkin-server-2.12.9-exec.jar
复制上次的Sleuth-ELK-xxxxx的两个项目
修改依赖
去掉logstash依赖,添加zipkin客户端依赖
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-zipkinartifactId>
dependency>
添加以下配置信息
spring:
zipkin:
base-url: http://127.0.0.1:9411
enabled: true
sleuth:
web:
client:
enabled: true
sampler:
probability: 1.0 #zipkin采集率 0.1表示 10%采集率
测试
启动produvt、consumer项目,测试是否能够访问得到数据,在登录zipkin界面,查看链路信息。(因为虚拟机和本机时间不一致,所以需要将时间范围扩大才能看到数据)
请求处理耗时计算:receive-send
创建服务端
然仍使用上次的zipkin.jar,通过改变启动方式(指定RabbitMQ的所在地址,用户名以及密码),指定登录账号密码等参数如下
RABBIT_ADDRESSES=192.168.179.136 RABBIT_USER=mquser RABBIT_PASSWORD=xxxxxx java -jar zipkin.jar
或者使用zipkin默认使用的guest用户登录 RABBIT_ADDRESSES=192.168.40.130 java -jar zipkin.jar
,而rabbit目前为了安全考虑,guest用户只能在localhost登录,我们也可以将rabbitmq安装目录下ebin目录下rabbit.app中loopback_users里的<<“guest”>>删除,重启rabbitmq服务,去除登陆限制。
附使用命令行运行zipkin整合rabbitmq的其他配置参数列表
rabbitmq:
# RabbitMQ server address list (comma-separated list of host:port)
addresses: ${RABBIT_ADDRESSES:}
concurrency: ${RABBIT_CONCURRENCY:1}
# TCP connection timeout in milliseconds
connection-timeout: ${RABBIT_CONNECTION_TIMEOUT:60000}
password: ${RABBIT_PASSWORD:guest}
queue: ${RABBIT_QUEUE:zipkin}
username: ${RABBIT_USER:guest}
virtual-host: ${RABBIT_VIRTUAL_HOST:/}
useSsl: ${RABBIT_USE_SSL:false}
uri: ${RABBIT_URI:}
scribe:
enabled: ${SCRIBE_ENABLED:false}
category: ${SCRIBE_CATEGORY:zipkin}
port: ${COLLECTOR_PORT:9410}
所有配置参数:https://github.com/openzipkin/zipkin/blob/master/zipkin-server/src/main/resources/zipkin-server-shared.yml
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-stream-rabbitartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-zipkinartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-sleuthartifactId>
dependency>
rabbitmq:
host: 192.168.40.130
port: 5672
username: mquser
password: xxxxxx
listener:
simple:
retry:
enabled: true #开启重试
max-attempts: 5 #重试次数.
在zipkin设置数据库信息。会自动将数据持久化到数据库中。
首先在mysql中创建表
CREATE TABLE IF NOT EXISTS zipkin_spans (
`trace_id_high` BIGINT NOT NULL DEFAULT 0 COMMENT 'If non zero, this means the trace uses 128 bit traceIds instead of 64 bit',
`trace_id` BIGINT NOT NULL,
`id` BIGINT NOT NULL,
`name` VARCHAR(255) NOT NULL,
`parent_id` BIGINT,
`debug` BIT(1),
`start_ts` BIGINT COMMENT 'Span.timestamp(): epoch micros used for endTs query and to implement TTL',
`duration` BIGINT COMMENT 'Span.duration(): micros used for minDuration and maxDuration query'
) ENGINE=InnoDB ROW_FORMAT=COMPRESSED CHARACTER SET=utf8 COLLATE utf8_general_ci;
ALTER TABLE zipkin_spans ADD UNIQUE KEY(`trace_id_high`, `trace_id`, `id`) COMMENT 'ignore insert on duplicate';
ALTER TABLE zipkin_spans ADD INDEX(`trace_id_high`, `trace_id`, `id`) COMMENT 'for joining with zipkin_annotations';
ALTER TABLE zipkin_spans ADD INDEX(`trace_id_high`, `trace_id`) COMMENT 'for getTracesByIds';
ALTER TABLE zipkin_spans ADD INDEX(`name`) COMMENT 'for getTraces and getSpanNames';
ALTER TABLE zipkin_spans ADD INDEX(`start_ts`) COMMENT 'for getTraces ordering and range';
CREATE TABLE IF NOT EXISTS zipkin_annotations (
`trace_id_high` BIGINT NOT NULL DEFAULT 0 COMMENT 'If non zero, this means the trace uses 128 bit traceIds instead of 64 bit',
`trace_id` BIGINT NOT NULL COMMENT 'coincides with zipkin_spans.trace_id',
`span_id` BIGINT NOT NULL COMMENT 'coincides with zipkin_spans.id',
`a_key` VARCHAR(255) NOT NULL COMMENT 'BinaryAnnotation.key or Annotation.value if type == -1',
`a_value` BLOB COMMENT 'BinaryAnnotation.value(), which must be smaller than 64KB',
`a_type` INT NOT NULL COMMENT 'BinaryAnnotation.type() or -1 if Annotation',
`a_timestamp` BIGINT COMMENT 'Used to implement TTL; Annotation.timestamp or zipkin_spans.timestamp',
`endpoint_ipv4` INT COMMENT 'Null when Binary/Annotation.endpoint is null',
`endpoint_ipv6` BINARY(16) COMMENT 'Null when Binary/Annotation.endpoint is null, or no IPv6 address',
`endpoint_port` SMALLINT COMMENT 'Null when Binary/Annotation.endpoint is null',
`endpoint_service_name` VARCHAR(255) COMMENT 'Null when Binary/Annotation.endpoint is null'
) ENGINE=InnoDB ROW_FORMAT=COMPRESSED CHARACTER SET=utf8 COLLATE utf8_general_ci;
ALTER TABLE zipkin_annotations ADD UNIQUE KEY(`trace_id_high`, `trace_id`, `span_id`, `a_key`, `a_timestamp`) COMMENT 'Ignore insert on duplicate';
ALTER TABLE zipkin_annotations ADD INDEX(`trace_id_high`, `trace_id`, `span_id`) COMMENT 'for joining with zipkin_spans';
ALTER TABLE zipkin_annotations ADD INDEX(`trace_id_high`, `trace_id`) COMMENT 'for getTraces/ByIds';
ALTER TABLE zipkin_annotations ADD INDEX(`endpoint_service_name`) COMMENT 'for getTraces and getServiceNames';
ALTER TABLE zipkin_annotations ADD INDEX(`a_type`) COMMENT 'for getTraces';
ALTER TABLE zipkin_annotations ADD INDEX(`a_key`) COMMENT 'for getTraces';
ALTER TABLE zipkin_annotations ADD INDEX(`trace_id`, `span_id`, `a_key`) COMMENT 'for dependencies job';
CREATE TABLE IF NOT EXISTS zipkin_dependencies (
`day` DATE NOT NULL,
`parent` VARCHAR(255) NOT NULL,
`child` VARCHAR(255) NOT NULL,
`call_count` BIGINT,
`error_count` BIGINT
) ENGINE=InnoDB ROW_FORMAT=COMPRESSED CHARACTER SET=utf8 COLLATE utf8_general_ci;
ALTER TABLE zipkin_dependencies ADD UNIQUE KEY(`day`, `parent`, `child`);
启动zipkin
STORAGE_TYPE=mysql MYSQL_JDBC_URL=jdbc:mysql://localhost:3306/zipkin MYSQL_USER=root MYSQL_PASS=xxxxxxxx RABBIT_ADDRESSES=192.168.179.136 RABBIT_USER=mquser RABBIT_PASSWORD=xxxxxx java -jar zipkin.jar
注意:这是一行命令数据 ,前面是指定的参数列表 ,后面java -xx才是启动的命令
数据库相关配置参数列表:
mysql:
jdbc-url: ${MYSQL_JDBC_URL:}
host: ${MYSQL_HOST:localhost}
port: ${MYSQL_TCP_PORT:3306}
username: ${MYSQL_USER:}
password: ${MYSQL_PASS:}
db: ${MYSQL_DB:zipkin}
max-active: ${MYSQL_MAX_CONNECTIONS:10}
use-ssl: ${MYSQL_USE_SSL:false}
测试
consumer发送请求,在zipkin查看链路,查看数据库是否有数据
(最后结果我也没搞出来,问题可能出在数据库连接上,可能是版本的问题。)