0%

springboot 添加xml报文接口

一般的接口都是用json,这篇介绍如何用jackson写xml报文接口

1. 给springboot添加依赖

1
2
compile group: 'com.fasterxml.jackson.dataformat', name: 'jackson-dataformat-xml', version: '2.10.1'
compile group: 'com.fasterxml.jackson.module', name: 'jackson-module-jaxb-annotations', version: '2.10.1'

2. 创建实体类类

封装属性类

<Element attr="s,103">value</Element>如果不封装,就没有attr这个属性

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
@Data
public class Element {
@JacksonXmlProperty(isAttribute = true)
private String attr="q";

@JacksonXmlText
private String value;

public Element() {
}

public Element(String attr) {
this.attr = attr;
}

public Element(String attr, String value) {
this.attr = attr;
this.value = value;
}
}

用户测试类

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty;
import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement;
import lombok.Data;

@JacksonXmlRootElement(localName = "service")
@Data
public class XmlTestBean {
@JacksonXmlProperty(isAttribute = true)
private String version = "2.0";
@JacksonXmlProperty(localName = "BODY")
private Body body;
@Data
public class Body {
@JacksonXmlProperty(localName = "TX_CODE")
private Element txCode;
@JacksonXmlProperty(localName = "Name")
private Element name;
}
@JacksonXmlProperty(localName = "SYS_HEAD")
private SysHead sysHead;
@Data
public class SysHead {
@JacksonXmlProperty(localName = "TRAN_TIMESTAMP")
private Element tranTimestamp;
}
}

3. 写测试接口

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
@RestController
@RequestMapping("/xml")
public class XmlController {
private Logger logger=Logger.getLogger(String.valueOf(getClass()));

/***
* consumes为请求参数的格式 Content-Type设置为application/xml
* produces为返回内容的格式 Content-Type设置为application/xml
* @param xmlTestBean
* @return
*/
@RequestMapping(value = "/test",consumes = MediaType.APPLICATION_XML_VALUE,produces = MediaType.TEXT_XML_VALUE)
@ResponseBody
public XmlTestBean test(@RequestBody XmlTestBean xmlTestBean){
logger.info("receive data: "+xmlTestBean.toString());
xmlTestBean.setVersion(xmlTestBean.getVersion()+"0");
XmlTestBean.Body body= xmlTestBean.getBody();
Element name =body.getName();
name.setAttr(name.getAttr()+"1");
name.setValue(name.getValue()+"2");
body.setName(name);
body.setTxCode(new Element(body.getTxCode().getAttr()+"3",body.getTxCode().getValue()+"4"));
xmlTestBean.setBody(body);
return xmlTestBean;
}
}

4. 优化返回

返回添加

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
@Configuration
public class Config {
/**
* jackson 转xml 全局配置
*
* @param builder
* @return
*/
@Bean
public MappingJackson2XmlHttpMessageConverter mappingJackson2XmlHttpMessageConverter(
Jackson2ObjectMapperBuilder builder) {
ObjectMapper mapper = builder.createXmlMapper(true).build();
// 设置全局返回显示 <?xml version='1.0' encoding='UTF-8'?>
((XmlMapper) mapper).enable(ToXmlGenerator.Feature.WRITE_XML_DECLARATION);
//<?xml version='1.0' encoding='UTF-8'?> 改为双引号 <?xml version="1.0" encoding="UTF-8"?>
String propName = com.ctc.wstx.api.WstxOutputProperties.P_USE_DOUBLE_QUOTES_IN_XML_DECL;
((XmlMapper) mapper).getFactory()
.getXMLOutputFactory()
.setProperty(propName, true);
return new MappingJackson2XmlHttpMessageConverter(mapper);
}
}

5. 测试数据

请求数据
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
<?xml version="1.0" encoding="UTF-8"?>
<service version="2.0">
<BODY>
<TX_CODE attr="s,10">facecompare</TX_CODE>
<Name attr="s,10">facecompare</Name>
<IdentNo attr="s,10">99999</IdentNo>
<IdentPhtFilePath attr="s,255">1.jpg</IdentPhtFilePath>
<SpotPhtFilePath attr="s,255">1.jpg</SpotPhtFilePath>
<OvlapPhtFlg attr="s,255">1</OvlapPhtFlg>
<CnlNo attr="s,255">12</CnlNo>
</BODY>
<SYS_HEAD>
<TRAN_TIMESTAMP attr="s,6">153907</TRAN_TIMESTAMP>
<CONSUMER_SEQ_NO attr="s,42">137000140118100000042563</CONSUMER_SEQ_NO>
<WS_ID attr="s,30">p5</WS_ID>
<SERVICE_SCENE attr="s,2">01</SERVICE_SCENE>
<CONSUMER_ID attr="s,6">137000</CONSUMER_ID>
<SERVICE_CODE attr="s,11">11002000018</SERVICE_CODE>
<TRAN_DATE attr="s,8">20140118</TRAN_DATE>
</SYS_HEAD>
<APP_HEAD>
<USER_ID attr="s,30">00120242</USER_ID>
<PER_PAGE_NUM attr="s,3"></PER_PAGE_NUM>
<QUERY_KEY attr="s,256"></QUERY_KEY>
<BRANCH_ID attr="s,9">0135</BRANCH_ID>
</APP_HEAD>
</service>
返回数据
1
2
3
4
5
6
7
8
9
10
<?xml version="1.0" encoding="UTF-8"?>
<service version="2.00">
<BODY>
<TX_CODE attr="s,103">facecompare4</TX_CODE>
<Name attr="s,101">facecompare2</Name>
</BODY>
<SYS_HEAD>
<TRAN_TIMESTAMP attr="s,6">153907</TRAN_TIMESTAMP>
</SYS_HEAD>
</service>

源码

xuanfong1/springLeaning

Mac 安装tomcat

  1. 下载tomcat8,选择zip,双击解压
  2. 执行sudo mv ~/Downloads/apache-tomcat-8.5.49 /Library
  3. idea配置tomact
  4. Idea添加web

视频直播解决方案

方案一ffmpeg+nginx(rtmp/hls)

rtmp解决方案大众,但是依赖adobe flash player

hls延时高

方案二ffmpeg+webSocket

原理ffmpeg解码转流(图片),webSocket接收,然后前端画布按帧绘制

1
2
3
4
5
6
7
8
#拉去代码https://github.com/phoboslab/jsmpeg
git clone git@github.com:phoboslab/jsmpeg.git
#进入项目目录执行
npm install ws
#运行JSMpeg,8081为ffmpeg推流端口,8082为websocket端口
node websocket-relay.js supersecret 8081 8082
#运行转码推流
ffmpeg -i rtsp://admin:admin@10.30.11.119:554/h264/ch1/main/av_stream -q 0 -f mpegts -codec:v mpeg1video -s 352x240 http://10.30.11.40:8081/supersecret

修改view-stream.html

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
<!DOCTYPE html>
<html>
<head>
<title>JSMpeg Stream Client</title>
<style type="text/css">
html, body {
background-color: #111;
text-align: center;
}
</style>
</head>
<body>
<canvas id="video-canvas"></canvas>
<script type="text/javascript" src="jsmpeg.min.js"></script>
<script type="text/javascript">
var canvas = document.getElementById('video-canvas');
var url = 'ws://10.30.11.150:8082/';
var player = new JSMpeg.Player(url, {canvas: canvas});
</script>
</body>
</html>

测试

访问静态网页

file:///Users/xuanleung/Downloads/jsmpeg-master/view-stream.html

参考

html5播放rtsp方案

phoboslab/jsmpeg

语法

1. SUM(<需要求和的字段,字段必须是数字类型>)count(<需要统计条数的字段>)

sum注意区分count,一个是根据字段里面的值求和,一个是根据条数求数据总条数

1
2
-- 对所有用户的年龄进行累加求和
select SUM(u.AGE) from t_user u ;

2. CASE WHEN <条件> THEN <满足条件的结果> ELSE <不满足条件的结果> END

CASE <条件字段> WHEN <值1> THEN <满足条件=值1的结果> WHEN <值2> THEN <满足条件=值2的结果> ... ELSE <不满足所有条件的结果> END

1
2
3
4
5
6
7
8
9
10
11
12
--eg:查询年龄大于18的flag输出成年人,否则未成年人
select CASE WHEN u.age>18 THEN '成年人' ELSE '未成年人' END as flag from t_user u;
--eg:多条件组合查询年龄大于18且是男的的flag输出男成年人,否则未成年人
select CASE WHEN u.age>18 and u.sex='男' THEN '男成年人' ELSE '未成年人' END as flag from t_user u;
--按条件统计总数,sum是求和,输出只能是1 ELSE 0,因为要进行累加
--统计大于18的总人数
select sum(CASE WHEN u.age>18 THEN 1 ELSE 0 END) as total from t_user u;
--多条件switch实现,将boy替换成男,girl替换成女,其他输出人妖
select CASE u.sex
WHEN 'boy' THEN '男'
WHEN 'girl' THEN '女'
ELSE '人妖' END from t_user u;

3. DECODE(<条件字段>,<值1>,<满足条件=值1的结果>,<值2>,<满足条件=值2的结果>,....,<都不满足>)

1
2
--将boy替换成男,girl替换成女,其他输出人妖,等效于case when
select DECODE(u.SEX,'boy','男','girl','女','人妖') from t_user u;

4. NVL(<需要判断的字段>,<如果判断的字段为null输出的结果>)

1
2
3
4
--数据为null的会替换成人妖
select nvl(u.SEX,'人妖') from t_user u;
--没有年龄的设置为0,方便统计空数据
select sum(nvl(u.age,0)) from t_user u;

5. group by <分组的字段1,字段2...>分组统计

select后面的字段=分组的字段+统计求和等字段,原理分组过后,查询不能查一个组有多个不同结果的字段,如果是相同的结果加入group by 字段1,字段2

1
2
3
4
--按年龄分组统计各个年龄的总数
select u.AGE,count(u.SEX) from T_USER u group by u.AGE;
--按年龄性别进行分组统计,统计年龄相同且性别相同的个数
select u.AGE,u.SEX,count(*) from T_USER u group by u.AGE,u.SEX

5. order by <排序字段> <desc/asc> asc升序,desc降序

1
2
--按时间升序
select * from T_USER order by "creat_time" asc

6. to_char(sysdate, 'yyyy-MM-dd')格式化日期

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
--sysdate获取当前日期,to_char格式化为天
select to_char(sysdate, 'yyyy-MM-dd') from DUAL;
--按天分组统计
select sum(1),to_char(u."creat_time",'YYYY-MM-DD') as day from t_user u group by to_char(u."creat_time",'YYYY-MM-DD');
--按月分组统计,按年等其他日期类似
select sum(1),to_char(u."creat_time",'YYYY-MM') as day from t_user u group by to_char(u."creat_time",'YYYY-MM');
--查询7天前的数据,其他天类似
select * from T_USER u where to_char(u."creat_time",'yyyy-MM-dd')>to_char(sysdate-7, 'yyyy-MM-dd')
--统计查询前7天的数据,当天没有统计为0,按时间降序
select t_date.day, NVl(t_data.total, 0)
from (select TO_CHAR(trunc(sysdate + 1 - ROWNUM), 'yyyy-MM-dd') day from DUAL connect by ROWNUM <= 7) t_date
left join (select to_char(u."creat_time", 'yyyy-MM-dd') as day, count(1) as total
from T_USER u
group by to_char(u."creat_time", 'yyyy-MM-dd')) t_data
on t_data.day = t_date.day
order by t_date.day asc;

7. round(<小数>,<保留小数点后位数>)

1
2
--保留小数点后2位,输出33.33
select round( 1/3*100 ,2) from dual;

8. left join 左连接

以左边为主,右边有就连接,没有就null

1
select * from T_USER l left join T_USER r on l.AGE=r.FLAG;

9. substr(<需要裁剪的字符串>,<开始位置>, <结束位置>)

1
2
-- 输出2019
select substr('2019-01-02',1, 4) from DUAL;

10. connect by

其他用法,获取树形数据(也就是父子关系)见google

rownum数据库关键字,行数

1
2
3
4
--生成1-10的序列
select rownum from dual connect by rownum<=10;
--生成7天的日期
select TO_CHAR(trunc(sysdate+1-ROWNUM),'yyyy-MM-dd') dd from DUAL connect by ROWNUM <= 7

11. union <all> 两个结果集合并

有all 全连接,不去重,没有all 去重

1
2
3
4
5
6
7
8
-- 输出1-4-1-4
select rownum from dual connect by rownum<=4
union all
select rownum from dual connect by rownum<=4;
-- 输出1-4
select rownum from dual connect by rownum<=4
union
select rownum from dual connect by rownum<=4;

12. ROLLUP 分组汇总

ROLLUP汇总分组排列在最后一条数据,但是数据头为null,可以通过null判断取别名为总数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
SELECT nvl(CASE
WHEN sex = 'boy' THEN '男'
WHEN sex = 'girl' THEN '女'
ELSE '人妖'
END, '总数') AS type,
count(1) as num
from t_user
GROUP BY
ROLLUP
( CASE
WHEN sex = 'boy' THEN '男'
WHEN sex = 'girl' THEN '女'
ELSE '人妖'
END);

13. ||字符连接符

用于单位,用于多条数据拼接

1
2
3
4
select 'sex是'||u.SEX||',年龄是'||u.AGE as detail from T_USER u;
--------输出结果------
--sex是boy,年龄是1
--sex是girl,年龄是2

示例数据

1
2
3
4
5
6
7
8
9
10
11
12
create table T_USER
(
AGE NUMBER,
FLAG VARCHAR2(10),
SEX VARCHAR2(4),
"creat_time" DATE
)
INSERT INTO SYSTEM.T_USER (AGE, FLAG, SEX, "creat_time") VALUES (1, '2', 'boy', TO_DATE('2019-10-23 03:14:11', 'YYYY-MM-DD HH24:MI:SS'));
INSERT INTO SYSTEM.T_USER (AGE, FLAG, SEX, "creat_time") VALUES (2, '4', 'girl', TO_DATE('2019-10-24 03:14:14', 'YYYY-MM-DD HH24:MI:SS'));
INSERT INTO SYSTEM.T_USER (AGE, FLAG, SEX, "creat_time") VALUES (3, '6', 'ff', TO_DATE('2019-10-26 03:14:19', 'YYYY-MM-DD HH24:MI:SS'));
INSERT INTO SYSTEM.T_USER (AGE, FLAG, SEX, "creat_time") VALUES (3, '2', null, TO_DATE('2019-10-23 03:14:23', 'YYYY-MM-DD HH24:MI:SS'));
INSERT INTO SYSTEM.T_USER (AGE, FLAG, SEX, "creat_time") VALUES (null, '2', null, TO_DATE('2019-10-23 03:14:25', 'YYYY-MM-DD HH24:MI:SS'));

gitlab-ci构建docker镜像的三种方式

官方教程

shell模式(dood),自定义runner镜像

见: Docker-Gitlab-Runner

优点:

  1. 自定义镜像,集成自己需要的工具

缺点:

  1. 采用宿主机docker进行编译

docker模式(dind),采用docker内部docker

Docker in Docker 19.03 service fails

优点:

  1. 独立(不影响宿主机),可以多线程构建

缺点:

  1. 需要vi /etc/gitlab-runner/config.toml设置[runners.docker]->privileged = true特权模式
  2. 编译慢每次要启动docker服务

版本19以后tls需要挂载或者禁用

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
#[三种方式使用docker构建](https://docs.gitlab.com/ee/ci/docker/using_docker_build.html)
#如果stages没有设置镜像,就使用该镜像
image: docker:19.03.1

# docker in docker =docker dind 容器内部启动docker
# docker out docker = dood 使用宿主机的docker,需要挂在/var/run/docker.sock
services:
- name: docker:19.03.1-dind

#docker in docker 版本19之后要禁用tls,后者配置证书
variables:
DOCKER_TLS_CERTDIR: ""


#每一个stages都会git clone项目
stages:
- package
- build
- deploy

#每一个stages前都会执行这下面的脚本
before_script:
- pwd
- ls

gradle_package:
image: java:8
stage: package
only:
- deploy-dev
script:
- ./gradlew bootJar
artifacts:
paths:
- build/libs/
docker_build:
stage: build
only:
- deploy-dev
script:
- docker build -t test:latest .

docker模式(dood)

错误1:

1
2
3
4
Exception in thread "main" java.lang.UnsatisfiedLinkError: Unable to load library 'NVSSDK':
libNVSSDK.so: 无法打开共享对象文件: 没有那个文件或目录
libossdk.so: 无法打开共享对象文件: 没有那个文件或目录
Native library (linux-x86-64/libNVSSDK.so) not found in resource path ([file:/opt/bpf/package/term_model/NetCameraCapture/NetCameraCapture-0.0.1-SNAPSHOT.jar])

解决

1
2
3
4
5
vi /etc/ld.so.conf
-------------------
/usr/lib #so包路径
-------------------
ldconfig

supervisord文件添加

1
environment=PATH=/home/face/jdk1.8/bin:/opt/bpf/package/term_model/NetCameraCapture,LD_LIBRARY_PATH=/usr/lib

常用命令

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# 密码登录验证
mongo 10.30.80.194:27017 -u admin --authenticationDatabase=admin -p mima
#查看数据库列表
>show dbs
#切换数据库
>use admin
#查看当前数据库
>db
#查看所有表
>show tables
#验证密码
>db.auth("admin", "adminPass")
#查询表中所有数据
>db.表名.find()
#查看集群状态
>rs.status()

常用场景

1. 批量更新updateMany

更新对象A中包含list<对象B中设备id为a1>,且对象B中的状态为0的所有数据,把状态更新为1

数据结构
1
2
3
4
5
6
7
8
9
10
11
{ 
"_id" : ObjectId("5d566377e831932f076cecfe"),
"name" : "222",
"devices_statuses" : [
{
"device_id" : "a1",
"status" : "0"
}
],
"_class" : "com.xxx.bean.CustomerInfoDevice"
}

####updateMany更新多条

findAndModify区别 findAndModify更新单条,sort排序的首条

mongo对应的js查询脚步

1
db.b_customer_info_device.updateMany({"devices_statuses.status":"0","devices_statuses.device_id":"a1"},{$pull:{"devices_statuses":{term_id:"D00010"}}});

springboot对应写法

1
2
3
4
5
6
7
Query query = new Query(Criteria.where("devices_statuses.device_id").is(termId).and("devices_statuses.status").in("0");
Update update = new Update();
//更新删除,删除devices_statuses数组对象中termId=?的,pull为从数组移出
//update.pull("devices_statuses", Query.query(Criteria.where("term_id").is(termId)));
//更新状态为1
update.set("devices_statuses.$.sync_status", "1");
mongoTemplate.updateMulti(query, update, CustomerInfoDevice.class);

2. 正则匹配$regex

1
2
3
4
5
6
---data
{"notice_key" : "[E00000003, 测试1]"}
---shell 且查询
db.getCollection("m_mq_log_record").find({"notice_key":{ $regex: /(?=.*测试1)(?=.*E00000003)/ }})
---java 且查询,`|`为或查询
query.addCriteria(Criteria.where("notice_key").regex("(?=.*测试1)(?=.*E00000003|.*E00000004|)"));

3. 聚合查询aggregate

数据结构
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
{
{
_id: ObjectId(7df78ad8902c)
name: '张三',
user: "san"
code: '0'
},
{
_id: ObjectId(7df78ad8902c)
name: '李四',
user: "si"
code: '1'
},
{
_id: ObjectId(7df78ad8902c)
name: '张三',
user: "san"
code: '1'
}
}
group

_id里面的字段进行分组统计,这里按code字段进行分组

_id:null 统计所有

_id:"$code"按code字段进行统计

1
2
3
4
5
6
7
8
9
db.getCollection("m_user").aggregate([
{
"$group":{
_id:"$code"
,recordNum:{'$sum': 1}
}
}
]);

执行结果

1
2
3
4
5
6
7
8
9
{ 
"_id" : "0",
"recordNum" : 1.0
}
// ----------------------------------------------
{
"_id" : "1",
"recordNum" : 2.0
}

group双层嵌套($push)-Pivot Data

先group统计最内层,然后把group聚合的数组对象放到子对象那(利用 subName: { $push: "$$ROOT" }

$push: "$$ROOT"是把聚合的对象放到一个字段subName里面

然后对统计好的再进行统计,如果要统计子对象数组里面的某个字段的数量,用{ $sum: "$$ROOT.total" }

1
2
3
4
-- 先统计event_id
{ "$group": { "_id": { "event_id": "$event_id", "event_sub_type": "$event_sub_type" }, "total": { "$sum": "$num" }, sub: { $push: "$$ROOT" } } }
-- 在分组统计event_sub_type
{ "$group": { "_id": "$_id.event_sub_type", sub: { $push: "$$ROOT" },total: { $sum: "$$ROOT.total" } } }
project控制输出显示的结果

1为显示该字段

1
2
3
4
"$project": {
"_id": 1,
"customer_id": 1
}
cond类似case when

cond里面的if只支持一个条件,但是cond可以嵌套

Java: ConditionalOperators

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
--如果$err_status=5 输出 1否则输出 0
"$cond": { "if": { "$eq": ["$err_status", "5"] }, "then": 1, "else": 0 }
-- 结合project可以统计错误信息等于5的数据条数
"$project": {
customer_id": 1
, "fail_tatus": { "$cond": { "if": { "$eq": ["$err_msg", "5"] }, "then": 1, "else": 0 } }
}
-- cond嵌套使用,"$gt": ["$record", 0] 可以用于判断对象是否为null,不为null继续cond,然后继续添加条件可以判断数组对象内的某个条件,达到1对多,子对象数组统计,判读子对象满足某个条件就设置为1,达到统计效果
{
"$cond": {
"if": { "$gt": ["$record", 0] }, "then":
{
"$cond": {
"if": { "$eq": ["$record.is_opposite", false] }, "then":
1, "else": 0
}
}, "else": 0
}
}
match条件过滤
1

unwind

嵌入实体平铺,1个对象里面包含数组,平铺成一个对象对一个数组的内容,最终等于数组的条数

1
2
3
"$unwind": "$term_info"
-- preserveNullAndEmptyArrays 为true时允许对象为null,不然平铺时如果对象为null时为null的这条数据就会消失
{ "$unwind": { "path": "$record", "preserveNullAndEmptyArrays": true } }
lookup

Java: LookupOperation

表关联左连接

1
2
3
4
-- 表2为主集合
-- from1, localField 表1字段, foreignField 表2字段, as 新表表面
"$lookup": { "from": "b_terminfo", "localField": "devices_statuses.term_id", "foreignField": "term_id", "as": "term_info" }
-- 查询出的结果,表2-[表1数组]
elemMatch 内嵌数组,查询,其中数组里面的一个对象完全满足才会查出来
1
2
3
4
5
"$elemMatch": {
"term_id": "M59903"
, "sync_status": "progressFail"
, "err_msg": { "$ne": "5" }
}}

对应java

1
2
3
4
5
Query query = new Query(Criteria.where("devices_statuses").elemMatch(
Criteria.where("term_id").is(termId)
.and("sync_status").is(Constants.OFFLINE_SYNC_DEVICE_PROGRESSFAIL)
.and("err_msg").ne(Constants.OFFLINE_SYNC_ERRORREASON_FAIL_FEATURE)
));
facet

多条语句组合一个结果,a,b,c各为独立的查询语句

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
db.getCollection("b_company").aggregate([
{
"$facet": {
"a": [
{ "$project": { Id: 1, "day": { "$substr": ["$time", 0, 10] } } }
, { "$match": { day: "2020-07-09" } }
, { "$group": { "_id": "$day", sum: { "$sum": 1 } } }
],
"b": [
{ "$group": { "_id": null, total2: { "$sum": 1 } } }
],
"c":[
{"$lookup":{from:"b_user",localField:"Id",foreignField:"company_id",as:"user"}}
,{"$unwind":"$user"}
,{ "$project": { Id: 1, "day": { "$substr": ["$user.user_login_time", 0, 10] } } }
,{ "$match": { day: "2021-02-05" } }
,{"$group":{"_id":"$Id",sum:{"$sum":1}}}
,{"$count":"total"}
]
}
}
])

对应java

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
Aggregation aggregation = Aggregation.newAggregation(
Aggregation.facet()
.and(
Aggregation.project("_id").and("time").substring(0, 10).as("day")
, Aggregation.match(Criteria.where("day").is(nowDate))
, Aggregation.count().as("count")
).as("day")
.and(
Aggregation.count().as("total")
).as("total")
.and(
Aggregation.lookup("b_user", "Id", "company_id", "user")
, Aggregation.unwind("$user")
, Aggregation.project("_id").and("user.user_login_time").substring(0, 10).as("day")
, Aggregation.match(Criteria.where("day").is(nowDate))
, Aggregation.group("_id")
, Aggregation.count().as("total")
).as("login")
);
$substr

日期转换为天

1
2
3
4
-- yyyy-mm-dd HH:mm:ss 转化成 yyyy-mm-dd
{ "$project": { Id: 1, "day": { "$substr": ["$time", 0, 10] } } }
--java
Aggregation.project("_id").and("time").substring(0, 10).as("day")

最终示例

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
db.getCollection("b_customer_info_device").aggregate([
{ "$unwind": "$devices_statuses" }
, { "$lookup": { "from": "b_terminfo", "localField": "devices_statuses.term_id", "foreignField": "term_id", "as": "term_info" } }
, { "$unwind": "$term_info" }
, { "$match": {} }
, {
"$project": {
"_id": 1, "customer_id": 1, "class_name": 1
, "feature_fail": { "$cond": { "if": { "$eq": ["$devices_statuses.err_msg", "5"] }, "then": 1, "else": 0 } }
, "total_fail": { "$cond": { "if": { "$eq": ["$devices_statuses.sync_status", "progressFail"] }, "then": 1, "else": 0 } }
}
}
, {
"$group": {
"_id": { "_id": "$_id", "customer_id": "$customer_id", "class_name": "$class_name" }
, "total_fail": { "$sum": "$total_fail" }
, "feature_fail": { "$sum": "$feature_fail" }
}
}
, {
"$project": {
"_id": 1, "customer_id": 1, "img_store_data": 1, "customer_name": 1, "class_name": 1
, "feature_fail": { "$cond": { "if": { "$gt": ["$feature_fail", 0] }, "then": 1, "else": 0 } }
, "total_fail": { "$cond": { "if": { "$gt": ["$total_fail", 0] }, "then": 1, "else": 0 } }
}
}

, {
"$group": {
"_id": null
, "total_customer": { "$sum": 1 }
, "total_fail": { "$sum": "$total_fail" }
, "feature_fail": { "$sum": "$feature_fail" }
}
}
]);

对应mongotemplate

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
ConditionalOperators.Cond condOperatorsFeature=ConditionalOperators.when(
criteria.where("devices_statuses.err_msg").is(Constants.OFFLINE_SYNC_ERRORREASON_FAIL_FEATURE))
.then(1)
.otherwise(0);
ConditionalOperators.Cond condOperatorsFail=ConditionalOperators.when(
criteria.where("devices_statuses.sync_status").is(Constants.OFFLINE_SYNC_DEVICE_PROGRESSFAIL))
.then(1)
.otherwise(0);


ConditionalOperators.Cond condOperatorsFeatureTotal=ConditionalOperators.when(
criteria.where("feature_fail").gt(0))
.then(1)
.otherwise(0);
ConditionalOperators.Cond condOperatorsFailTotal=ConditionalOperators.when(
criteria.where("total_fail").gt(0))
.then(1)
.otherwise(0);

Aggregation aggregation = Aggregation.newAggregation(
Aggregation.unwind("$devices_statuses")
,LookupOperation.newLookup().from("b_terminfo")
.localField("devices_statuses.term_id")
.foreignField("term_id").as("term_info")
,Aggregation.unwind("$term_info")
,Aggregation.match(criteria)
,Aggregation.project("customer_id","class_name")
.and(condOperatorsFeature).as("feature_fail")
.and(condOperatorsFail).as("total_fail")
,Aggregation.group("customer_id","class_name")
.sum("total_fail").as("total_fail")
.sum("feature_fail").as("feature_fail")
,Aggregation.project()
.and(condOperatorsFeatureTotal).as("feature_fail")
.and(condOperatorsFailTotal).as("total_fail")
,Aggregation.group()
.count().as("total_customer")
.sum("total_fail").as("total_fail")
.sum("feature_fail").as("feature_fail")
);
AggregationResults<BasicDBObject> dBObjects=mongoTemplate.aggregate(aggregation,"b_customer_info_device",BasicDBObject.class);

JSONArray countResult = JSON.parseObject(dBObjects.getRawResults().toJson()).getJSONArray("result");
if(countResult!=null&&countResult.size()>0){
JSONObject jobj = (JSONObject)countResult.get(0);
return jobj;
}

表关联子类统计

按类别统计每个事件的数量,输出结果如下

1
2
3
4
5
6
7
8
9
|-类型1
|--事件1
|----事件1记录1
|----事件1记录2
|----事件1记录n条
|--事件2
|-类型2
|--事件3
|----事件3记录1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
String startTime = startDay + " 00:00:00";

ConditionalOperators.Cond condCompany= addCompanyIdCriteriaInCond("record.company_id");

//判断是否是当天
ConditionalOperators.Cond condDay = ConditionalOperators.when(Criteria.where("record.trans_time").gte(startTime)
).then(condCompany!=null?condCompany:1).otherwise(0);

//添加字表的判断是否是应答事件
ConditionalOperators.Cond condOpposite = ConditionalOperators.when(Criteria.where("record.is_opposite").is(false)
).then(condDay).otherwise(0);

//gt(0)是为了判断对象(是否有记录)是否为null,如果有对象就会大于0
ConditionalOperators.Cond condOperators = ConditionalOperators.when(Criteria.where("record").gt(0)
).then(condOpposite).otherwise(0);

Aggregation aggregation = Aggregation.newAggregation(
Aggregation.match(Criteria.where("event_type").is("device"))
, Aggregation.lookup("r_event_record", "event_id", "event_id", "record")
, Aggregation.unwind("$record", true)
, Aggregation.project("event_id", "event_name","event_sub_type").and(condOperators).as("num")
, Aggregation.group("event_id", "event_name","event_sub_type").sum("num").as("total")
, Aggregation.group("_id.event_sub_type").push("$$ROOT").as("sub").sum("$$ROOT.total").as("total")
);

AggregationResults<BasicDBObject> dBObjects = mongoTemplate.aggregate(aggregation, "b_event_info", BasicDBObject.class);
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
db.getCollection("b_event_info").aggregate([
{ "$match": { "event_type": "device" } }
, { "$lookup": { "from": "r_event_record", "localField": "event_id", "foreignField": "event_id", "as": "record" } }
, { "$unwind": { "path": "$record", "preserveNullAndEmptyArrays": true } }

, {
"$project": {
"event_id": 1, "event_name": 1, "num":
{
"$cond": {
"if": { "$gt": ["$record", 0] }, "then":
{
"$cond": {
"if": { "$eq": ["$record.is_opposite", false] }, "then":
{ "$cond": { "if": { "$gte": ["$record.trans_time", "2021-03-16 00:00:00"] }, "then": 1, "else": 0 } }, "else": 0
}
}, "else": 0
}
}
, "event_sub_type": 1
}
}
, { "$group": { "_id": { "event_id": "$event_id", "event_sub_type": "$event_sub_type" }, "total": { "$sum": "$num" }, sub: { $push: "$$ROOT" } } }
, { "$group": { "_id": "$_id.event_sub_type", sub: { $push: "$$ROOT" },total: { $sum: "$$ROOT.total" } } }
--因为$addFields在mongotemplate找不到对应的语句,所以用上面的$group替代
// ,{"$project": {"_id":1,"sub":1, total: { $sum: "$sub.total" }}}
// , {
// $addFields:
// {
// total: { $sum: "$books.total" }
// }
// }
])

常用命令

1
2
3
4
5
6
7
8
9
10
#查看mongo内存使用命令
mongostat
---------------------------------------------------------------------------------------
insert query update delete getmore command dirty used flushes vsize res qrw arw net_in net_out conn time
2 10 4 *0 0 3|0 0.1% 80.0% 0 8.98G 7.85G 0|0 5|0 10.8k 68.6k 30 Aug 20 11:51:55.367
*0 *0 *0 *0 0 2|0 0.1% 80.0% 0 8.98G 7.85G 0|0 5|0 212b 62.0k 29 Aug 20 11:51:56.368
2 10 4 *0 0 2|0 0.1% 80.0% 0 8.98G 7.86G 0|0 5|0 10.8k 68.2k 29 Aug 20 11:51:57.367
*0 3 1 *0 0 2|0 0.1% 80.0% 0 8.98G 7.86G 0|0 5|0 5.38k 65.1k 29 Aug 20 11:51:58.367
*0 *0 *0 *0 0 1|0 0.1% 80.0% 0 8.98G 7.86G 0|0 5|0 157b 61.8k 29 Aug 20 11:51:59.368
--------------------------------------------------------------

WiredTiger存储引擎

wiredTiger对内存使用会分为两大部分,一部分是内部内存,另外一部分是文件系统的缓存。内部内存默认值有一个计算公式{ 50% of(RAM-1GB) ,or256MB },索引和集合的内存都被加载到内部内存,索引是被压缩的放在内部内存,集合则没有压缩。wiredTiger会通过文件系统缓存,自动使用其他所有的空闲内存,放在文件系统缓存里面的数据,与磁盘上的数据格式一致,可以有效减少磁盘I/O。

mongodb不干涉内存管理,将内存管理工作交给操作系统去处理。在使用时必须随时监测内存使用情况,因为mongodb会把所有能用的内存都用完。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
For example, on a system with a total of 4GB of RAM the WiredTiger cache will use 1.5GB of RAM (0.5 * (4 GB - 1 GB) = 1.5 GB). Conversely, a system with a total of 1.25 GB of RAM will allocate 256 MB to the WiredTiger cache because that is more than half of the total RAM minus one gigabyte (0.5 * (1.25 GB - 1 GB) = 128 MB < 256 MB).

NOTE

In some instances, such as when running in a container, the database can have memory constraints that are lower than the total system memory. In such instances, this memory limit, rather than the total system memory, is used as the maximum RAM available.

To see the memory limit, see hostInfo.system.memLimitMB.

By default, WiredTiger uses Snappy block compression for all collections and prefix compression for all indexes. Compression defaults are configurable at a global level and can also be set on a per-collection and per-index basis during collection and index creation.

Different representations are used for data in the WiredTiger internal cache versus the on-disk format:

Data in the filesystem cache is the same as the on-disk format, including benefits of any compression for data files. The filesystem cache is used by the operating system to reduce disk I/O.
Indexes loaded in the WiredTiger internal cache have a different data representation to the on-disk format, but can still take advantage of index prefix compression to reduce RAM usage. Index prefix compression deduplicates common prefixes from indexed fields.
Collection data in the WiredTiger internal cache is uncompressed and uses a different representation from the on-disk format. Block compression can provide significant on-disk storage savings, but data must be uncompressed to be manipulated by the server.
Via the filesystem cache, MongoDB automatically uses all free memory that is not used by the WiredTiger cache or by other processes.

参考

mongodb——内存

简介

cgroups 的全称是control groups,cgroups为每种可以控制的资源定义了一个子系统。docker也是通过该原理进行限制资源。

  • cpu 子系统,主要限制进程的 cpu 使用率。
  • cpuacct 子系统,可以统计 cgroups 中的进程的 cpu 使用报告。
  • cpuset 子系统,可以为 cgroups 中的进程分配单独的 cpu 节点或者内存节点。
  • memory 子系统,可以限制进程的 memory 使用量。
  • blkio 子系统,可以限制进程的块设备 io。
  • devices 子系统,可以控制进程能够访问某些设备。
  • net_cls 子系统,可以标记 cgroups 中进程的网络数据包,然后可以使用 tc 模块(traffic control)对数据包进行控制。
  • freezer 子系统,可以挂起或者恢复 cgroups 中的进程。
  • ns 子系统,可以使不同 cgroups 下面的进程使用不同的 namespace。

安装

参考

Linux资源管理之cgroups简介

1
2
3
4
5
6
7
8
9
10
11
12
13
@Component
public class TestSchedule {
@ScheduleAnnotaion(serviceId = "DataSync", scheduleDesc = "数据同步", checkIntervalTime = 30, isSignalExecuteFlag = true, addHisTaskTableFlag = false)
@Scheduled(cron = "0 * * * * ?") // 每1分钟执行一次

public void executeSchedule() throws ResponseException {
executeScheduleNow();
}

public void executeScheduleNow() {
//todo 需要添加的操作
}
}