前面已经完成了Springcloud+sleuth+zipkin的入门,以及kafka的安装。至于ES这里就不在说明了,网上安装使用资料挺多的,这里仅仅是将其作为持久化工具使用。
在pom.xml中加入如下内容:
<dependencies>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-webartifactId>
dependency>
<dependency>
<groupId>io.zipkin.javagroupId>
<artifactId>zipkinartifactId>
<version>2.4.2version>
dependency>
<dependency>
<groupId>io.zipkin.javagroupId>
<artifactId>zipkin-autoconfigure-storage-elasticsearch-httpartifactId>
<version>2.4.2version>
<optional>trueoptional>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-sleuth-zipkin-streamartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-stream-kafkaartifactId>
dependency>
dependencies>
同时在resources中创建application.yml
#配置kafka
spring:
sleuth:
enabled: false
sampler:
percentage: 1.0
cloud:
stream:
kafka:
binder:
brokers: localhost:9092
zkNodes: localhost:2181
#ES配置
zipkin:
storage:
type: elasticsearch
elasticsearch:
host: localhost:9200
cluster: elasticsearch
index: zipkin
index-shards: 1
index-replicas: 1
启动类增加如下注解:
@SpringBootApplication
@EnableZipkinStreamServer//配置可以作为zipkinserver
public class ZipkinServerApplication {
public static void main(String[] args) {
SpringApplication.run(ZipkinServerApplication.class,args);
}
}
启动后可以 在控制台看到kafka的连接信息
在pom.xml中引入:
<dependencies>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-webartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-sleuth-zipkin-streamartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-sleuthartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-stream-binder-kafkaartifactId>
dependency>
<dependency>
<groupId>net.logstash.logbackgroupId>
<artifactId>logstash-logback-encoderartifactId>
<version>4.6version>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-feignartifactId>
dependency>
dependencies>
注意对于日志依赖的引入。
在application.yml中加入如下配置:
server:
port: 8082
spring:
application:
name: serverone
sleuth:
sampler:
percentage: 1.0
cloud:
stream:
kafka:
binder:
brokers: localhost:9092
zkNodes: localhost:2181
创建logback-spring.xml并如下配置:
<configuration>
<include resource="org/springframework/boot/logging/logback/defaults.xml"/>
<springProperty scope="context" name="springAppName" source="spring.application.name"/>
<property name="LOG_FILE" value="${BUILD_FOLDER:-build}/${springAppName}"/>
<property name="CONSOLE_LOG_PATTERN"
value="%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr([${springAppName:-},%X{X-B3-TraceId:-},%X{X-B3-SpanId:-},%X{X-Span-Export:-}]){yellow} %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %clr(%-40.40logger{39}){cyan} %clr(:){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}"/>
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFOlevel>
filter>
<encoder>
<pattern>${CONSOLE_LOG_PATTERN}pattern>
<charset>utf8charset>
encoder>
appender>
<appender name="flatfile" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_FILE}file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${LOG_FILE}.%d{yyyy-MM-dd}.gzfileNamePattern>
<maxHistory>7maxHistory>
rollingPolicy>
<encoder>
<pattern>${CONSOLE_LOG_PATTERN}pattern>
<charset>utf8charset>
encoder>
appender>
<appender name="logstash" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_FILE}.jsonfile>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${LOG_FILE}.json.%d{yyyy-MM-dd}.gzfileNamePattern>
<maxHistory>7maxHistory>
rollingPolicy>
<encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>UTCtimeZone>
timestamp>
<pattern>
<pattern>
{
"severity": "%level",
"service": "${springAppName:-}",
"trace": "%X{X-B3-TraceId:-}",
"span": "%X{X-B3-SpanId:-}",
"exportable": "%X{X-Span-Export:-}",
"pid": "${PID:-}",
"thread": "%thread",
"class": "%logger{40}",
"rest": "%message"
}
pattern>
pattern>
providers>
encoder>
appender>
<root level="INFO">
<appender-ref ref="logstash"/>
root>
configuration>
在启动类加入如下配置:
@SpringBootApplication
@EnableFeignClients //引入feign支持
@EnableAutoConfiguration //引入自动配置,替代配置文件
public class ServerOneApplication {
public static void main(String[] args) {
SpringApplication.run(ServerOneApplication.class,args);
}
}
创建的restTemplate 配置类为:
@Configuration
public class RestConfiguration {
@Bean
public RestTemplate restTemplate(){
return new RestTemplate();
}
}
feign的客户端为:
@FeignClient(name = "sleuthone",url = "http://localhost:8888")
public interface SleuthService {
@RequestMapping("/sayHello/{name}")
public String sayHello(@PathVariable(name = "name")String name);
}
调用的后台controller为:
@RestController
public class SleuthController {
@Autowired
private RestTemplate restTemplate;
@Autowired
private SleuthService sleuthService;
@ResponseBody
@RequestMapping("/restHello/{name}")
public String restHello(@PathVariable String name) {
return "rest " + restTemplate.getForObject("http://localhost:8888/sayHello/" + name,String.class);
}
@ResponseBody
@RequestMapping("/feignHello/{name}")
public String feignHello(@PathVariable String name) {
return "feign " + sleuthService.sayHello(name);
}
}
服务二与服务一基本相同,不同的地方在于服务二不需要引入feign了,其pom如下:
<dependencies>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-webartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-sleuthartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-sleuth-zipkin-streamartifactId>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-stream-binder-kafkaartifactId>
dependency>
<dependency>
<groupId>net.logstash.logbackgroupId>
<artifactId>logstash-logback-encoderartifactId>
<version>4.6version>
dependency>
dependencies>
其他一致,服务二仅提供一个sayHello服务。
@RestController
public class SleuthController {
@ResponseBody
@RequestMapping("/sayHello/{name}")
public String sayHello(@PathVariable String name) {
return "hello " + name;
}
}
分别访问http://localhost:8082/restHello/lisi
http://localhost:8082/feignHello/lisi
正确返回信息后,我们访问es查询数据
http://localhost:9200/zipkin*/_search?pretty
可以看到如下数据:
{
"took" : 2,
"timed_out" : false,
"_shards" : {
"total" : 2,
"successful" : 2,
"skipped" : 0,
"failed" : 0
},
"hits" : {
"total" : 37,
"max_score" : 1.0,
"hits" : [
{
"_index" : "zipkin:span-2018-01-08",
"_type" : "span",
"_id" : "AWDUsg4wSqtAFWoqJU1h",
"_score" : 1.0,
"_source" : {
"traceId" : "45fc228abc4a4edf",
"duration" : 4000,
"shared" : true,
"localEndpoint" : {
"serviceName" : "servertwo",
"ipv4" : "10.130.236.27",
"port" : 8888 },
"timestamp_millis" : 1515396926392,
"kind" : "SERVER",
"name" : "http:/sayhello/lisi",
"id" : "d852bf5a65f75acb",
"parentId" : "45fc228abc4a4edf",
"timestamp" : 1515396926392000,
"tags" : {
"mvc.controller.class" : "SleuthController",
"mvc.controller.method" : "sayHello",
"spring.instance_id" : "01C702601479820.corp.haier.com:servertwo:8888" }
}
},
{
"_index" : "zipkin:span-2018-01-08",
"_type" : "span",
"_id" : "AWDUsg8wSqtAFWoqJU1i",
"_score" : 1.0,
"_source" : {
"traceId" : "45fc228abc4a4edf",
"duration" : 36000,
"localEndpoint" : {
"serviceName" : "serverone",
"ipv4" : "10.130.236.27",
"port" : 8082 },
"timestamp_millis" : 1515396926361,
"kind" : "CLIENT",
"name" : "http:/sayhello/lisi",
"id" : "d852bf5a65f75acb",
"parentId" : "45fc228abc4a4edf",
"timestamp" : 1515396926361000,
"tags" : {
"http.host" : "localhost",
"http.method" : "GET",
"http.path" : "/sayHello/lisi",
"http.url" : "http://localhost:8888/sayHello/lisi",
"spring.instance_id" : "01C702601479820.corp.haier.com:serverone:8082" }
}
},
{
"_index" : "zipkin:span-2018-01-08",
"_type" : "span",
"_id" : "AWDUsg8wSqtAFWoqJU1j",
"_score" : 1.0,
"_source" : {
"traceId" : "45fc228abc4a4edf",
"duration" : 56696,
"localEndpoint" : {
"serviceName" : "serverone",
"ipv4" : "10.130.236.27",
"port" : 8082 },
"timestamp_millis" : 1515396926355,
"kind" : "SERVER",
"name" : "http:/feignhello/lisi",
"id" : "45fc228abc4a4edf",
"timestamp" : 1515396926355000,
"tags" : {
"mvc.controller.class" : "SleuthController",
"mvc.controller.method" : "feignHello",
"spring.instance_id" : "01C702601479820.corp.haier.com:serverone:8082" }
}
}
]
}
}
我们需要注意:duration这里的单位是微妙,所以耗时56ms
1、数据没有进入到kafka通道,没有配置日志输出到日志文件,通过对源码分析,我们发现其kafka绑定的是日志事件
2、启动服务时,kafka停机或者不能消费参看上一篇关于kafka不能消费的问题。其实主要原因是jdk版本问题。
源码地址