Deterministic Finite Automation 有穷自动机,本质是一个数据结构,一个Map
存储: 一次性的把所有的敏感词存储到了多个map
中
例:
敏感词: 答辩
文章内容: 我明天要答辩、毕设答辩、毕业答辩
{
"我": {
"明": {
"天": {
"要": {
"答": {
"辩": {
"isEnd": 1
},
"isEnd": 0
},
"isEnd": 0
},
"isEnd": 0
},
"isEnd": 0
}
,
"isEnd": 0
},
"毕": {
"设": {
"答": {
"辩": {
"isEnd": 1
},
"isEnd": 0
},
"isEnd": 0
},
"业": {
"答": {
"辩": {
"isEnd": 1
},
"isEnd": 0
},
"isEnd": 0
},
"isEnd": 0
}
}
工具类
import org.apache.commons.lang3.RandomStringUtils;
import java.util.*;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.locks.ReentrantReadWriteLock;
public class SensitiveWordUtil {
public static Map<String, Object> dictionaryMap = new HashMap<>();
public static ReentrantReadWriteLock reentrantReadWriteLock = new ReentrantReadWriteLock();
/**
* 生成关键词字典库
*
* @param words
* @return
*/
public static void initMap(Collection<String> words) {
ReentrantReadWriteLock.WriteLock writeLock = reentrantReadWriteLock.writeLock();
writeLock.lock();
try{
if (words == null) {
System.out.println("敏感词列表不能为空");
return;
}
// map初始长度words.size(),整个字典库的入口字数(小于words.size(),因为不同的词可能会有相同的首字)
Map<String, Object> map = new HashMap<>(words.size());
// 遍历过程中当前层次的数据
Map<String, Object> curMap = null;
Iterator<String> iterator = words.iterator();
while (iterator.hasNext()) {
String word = iterator.next();
curMap = map;
int len = word.length();
for (int i = 0; i < len; i++) {
// 遍历每个词的字
String key = String.valueOf(word.charAt(i));
// 当前字在当前层是否存在, 不存在则新建, 当前层数据指向下一个节点, 继续判断是否存在数据
Map<String, Object> wordMap = (Map<String, Object>) curMap.get(key);
if (wordMap == null) {
// 每个节点存在两个数据: 下一个节点和isEnd(是否结束标志)
wordMap = new HashMap<>(2);
wordMap.put("isEnd", "0");
curMap.put(key, wordMap);
}
curMap = wordMap;
// 如果当前字是词的最后一个字,则将isEnd标志置1
if (i == len - 1) {
curMap.put("isEnd", "1");
}
}
}
dictionaryMap = map;
}finally {
writeLock.unlock();
}
}
/**
* 搜索文本中某个文字是否匹配关键词
*
* @param text
* @param beginIndex
* @return
*/
private static int checkWord(String text, int beginIndex) {
if (dictionaryMap == null) {
throw new RuntimeException("字典不能为空");
}
boolean isEnd = false;
int wordLength = 0;
Map<String, Object> curMap = dictionaryMap;
int len = text.length();
// 从文本的第beginIndex开始匹配
for (int i = beginIndex; i < len; i++) {
String key = String.valueOf(text.charAt(i));
// 获取当前key的下一个节点
curMap = (Map<String, Object>) curMap.get(key);
if (curMap == null) {
break;
} else {
wordLength++;
if ("1".equals(curMap.get("isEnd"))) {
isEnd = true;
}
}
}
if (!isEnd) {
wordLength = 0;
}
return wordLength;
}
/**
* 获取匹配的关键词和命中次数
*
* @param text
* @return
*/
public static Map<String, Integer> matchWords(String text) {
ReentrantReadWriteLock.ReadLock readLock = reentrantReadWriteLock.readLock();
readLock.lock();
try{
Map<String, Integer> wordMap = new HashMap<>();
int len = text.length();
for (int i = 0; i < len; i++) {
int wordLength = checkWord(text, i);
if (wordLength > 0) {
String word = text.substring(i, i + wordLength);
// 添加关键词匹配次数
if (wordMap.containsKey(word)) {
wordMap.put(word, wordMap.get(word) + 1);
} else {
wordMap.put(word, 1);
}
i += wordLength - 1;
}
}
return wordMap;
}finally {
readLock.unlock();
}
}
public static void main(String[] args) {
// List list = new ArrayList<>();
// list.add("**");
// list.add("***");
// list.add("****");
// initMap(list);
// String content="我是一个好人,并不会***,也不****,我真的**";
// Map map = matchWords(content);
// System.out.println(map);
int size = 10000;
CountDownLatch countDownLatch = new CountDownLatch(size);
ExecutorService executorService = Executors.newFixedThreadPool(size);
for (int i = 0; i < size / 2; i++) {
executorService.submit(() -> {
countDownLatch.countDown();
try {
countDownLatch.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
List<String> list = new ArrayList<>();
for (int j = 0; j < size; j++) {
list.add(RandomStringUtils.random(100));
}
initMap(list);
});
executorService.submit(() -> {
countDownLatch.countDown();
try {
countDownLatch.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
String content = "我是一个好人,并不会卖**";
Map<String, Integer> map = matchWords(content);
System.out.println(map);
});
}
}
}
光学字符识别,是指电子设备检查纸上打印的字符,通过检测暗、亮的模式确定其形状,然后用字符识别的方式将形状翻译成计算机文字的过程
常见的OCR
技术
方案 | 说明 |
---|---|
百度OCR | 收费 |
Tesseract-OCR | Google维护的开源OCR引擎,支持Java、Python等语言调用 |
Tess4J | 封装了Tesseract-OCR,支持Java调用 |
<dependency>
<groupId>net.sourceforge.tess4jgroupId>
<artifactId>tess4jartifactId>
dependency>
导入中文字体库,将文件放在自己的工作路径 资源地址
编写测试类进行测试
// 获取要进行图文识别的图片
File file = new File("Path/To/Image");
// 创建Tesseract对象
ITesseract tesseract = new Tesseract();
// 设置字体库路径,字体库文件所在的文件夹
tesseract.setDatapath("Path/To/tessdata");
// 中文识别
tesseract.setLanguage("chi_sim");
// 执行OCR识别
String result = tesseract.doOCR(file);
// 替换回车和tal键,使结果为一行
result = result.replaceAll("\\r|\\n", "-").replaceAll(" ", "-");
与SpringBoot整合:
导入依赖
编写配置文件
tess4j:
language: chi_sim
dataPath: Path\to\tessdata
测试
@Autowired
private Tess4JClient tess4jClient;
@Test
public void test() throws Exception {
BufferedImage bufferedImage = ImageIO.read(new File("Path/to/Image"));
String s = tess4jClient.doOCR(bufferedImage);
}