Commit 4b67a9ac by liusq

新增绿牌,蓝牌长度限制和信号自动析构

parent f5c6f1e3
#include "CameraHandle.h"
#include "TaskRunnable.h"
#include "HumanDetection.h"
#include "ScopeSemaphoreExit.h"
#include <QRegularExpression>
CameraHandle::CameraHandle(){
......@@ -14,13 +15,12 @@ CameraHandle::CameraHandle(QString &url, QString &httpUrl, QString &sSn, int &ch
channel(channel),
httpUrl(httpUrl),
dev_snap_syn_timer(new QTimer()),
release_timer(new QTimer()),
image_save(imageSave),
semaphore(1) {
connect(this, SIGNAL(afterDownloadFile(int,int,QString)), this, SLOT(pushRecordToCloud(int,int,QString)),Qt::QueuedConnection);
detector = TCV_CreateHumanDetector();
// 设置检测得分阈值 默认0.5
TCV_HumanDetectorSetScoreThreshold(detector, 0.5f);
TCV_HumanDetectorSetScoreThreshold(detector, 0.8f);
HLPR_ContextConfiguration configuration = {0};
QByteArray && by_mpath=modelPaths.toUtf8();
......@@ -35,14 +35,12 @@ CameraHandle::CameraHandle(QString &url, QString &httpUrl, QString &sSn, int &ch
configuration.threads = 1;
ctx = HLPR_CreateContext(&configuration);
connect(release_timer, &QTimer::timeout, this, &CameraHandle::releaseSemaphore);
}
CameraHandle::~CameraHandle() {
Common & instace= Common::getInstance();
dev_snap_syn_timer->stop();
instace.deleteObj(dev_snap_syn_timer);
instace.deleteObj(release_timer);
instace.deleteObj(loginParam);
instace.deleteObj(sxMediaFaceImageReq);
if(detector!=nullptr){
......@@ -240,10 +238,11 @@ void CameraHandle::getCurrentFrame(std::vector<uchar> &buffer){
}
void CameraHandle::initSdkRealTimeDevSnapSyn(int hDevice,int syn_timer) {
void CameraHandle::initSdkRealTimeDevSnapSyn(int hDevice,int syn_timer,uint64 face_frequency) {
connect(dev_snap_syn_timer, &QTimer::timeout, this, [this,hDevice]() {
this->sdkRealTimeDevSnapSyn(hDevice);
},Qt::QueuedConnection);
this->face_frequency=face_frequency;
dev_snap_syn_timer->start(syn_timer);
}
void CameraHandle::sdkRealTimeDevSnapSyn(int hDevice) {
......@@ -257,6 +256,8 @@ void CameraHandle::sdkRealTimeDevSnapSyn(int hDevice) {
threadPool->start(taskRunnable);
}
QString CameraHandle::getSSn(){
return sSn;
......@@ -322,26 +323,14 @@ void CameraHandle::sdkDownloadFileByTime(XSDK_HANDLE hDevice,int id,
}
bool CameraHandle::acquireAndReleaseWithTimeout(bool flag) {
int CameraHandle::callbackFunction(XSDK_HANDLE hObject, QString &szString) {
if (!semaphore.tryAcquire()) {
qInfo() << (flag ? "callbackFunction:正在执行线程 " : "sdkDevSnapSyn:正在执行线程");
return true;
}
QMetaObject::invokeMethod(release_timer, "start",
Qt::QueuedConnection,
Q_ARG(int, timeoutMs));
return false;
}
void CameraHandle::releaseSemaphore() {
if (release_timer->isActive()) {
QMetaObject::invokeMethod(release_timer, "stop", Qt::QueuedConnection);
qInfo() << "sdkDevSnapSyn:正在执行线程";
return -1;
}
semaphore.release();
}
int CameraHandle::callbackFunction(XSDK_HANDLE hObject, QString &szString) {
ScopeSemaphoreExit guard([this]() {
semaphore.release(); // 释放信号量
});
QByteArray && byJson = szString.toLocal8Bit();
const char * cJson= byJson.data();
XSDK_CFG::AlarmInfo alarmInfo;
......@@ -360,31 +349,34 @@ int CameraHandle::callbackFunction(XSDK_HANDLE hObject, QString &szString) {
{
qDebug() << "OnDevAlarmCallback[Dev:" << hObject << "][Event:" << szString << "]";
}
if(acquireAndReleaseWithTimeout(false)){
return -1;
}
cv::Mat image;
MediaFaceImage* mediaFaceImage= MediaFaceImage::getInstance();
qint64 currentTime= QDateTime::currentSecsSinceEpoch();
mediaFaceImage->FaceImageCallBack(hObject,sxMediaFaceImageReq->nChannel,image);
mediaFaceImage->FaceImageCallBack(hObject,this->channel,image);
if (image.empty())
{
qInfo() << "Failed to read the image";
return -1;
}
this->updateImage(image,currentTime);
QMetaObject::invokeMethod(release_timer, "stop", Qt::QueuedConnection);
semaphore.release();
if (image.rows <= 0 || image.cols <= 0 || image.channels() <= 0) {
qInfo() << "图像尺寸或通道数不正确,需排查原因";
return -1;
}
updateImage(image, currentTime);
}
void CameraHandle::sdkDevSnapSyn(XSDK_HANDLE hDevice, int nChannel){
if(hDevice<=0){
qInfo() << "相机断线";
return;
}
if(acquireAndReleaseWithTimeout(true)){
if (!semaphore.tryAcquire()) {
qInfo() << "callbackFunction:正在执行线程";
return ;
}
ScopeSemaphoreExit guard([this]() {
semaphore.release(); // 释放信号量
});
cv::Mat image;
MediaFaceImage* mediaFaceImage= MediaFaceImage::getInstance();
qint64 currentTime= QDateTime::currentSecsSinceEpoch();
......@@ -413,10 +405,11 @@ void CameraHandle::sdkDevSnapSyn(XSDK_HANDLE hDevice, int nChannel){
qInfo() << "Failed to read the image";
return ;
}
this->updateImage(image,currentTime);
QMetaObject::invokeMethod(release_timer, "stop", Qt::QueuedConnection);
semaphore.release();
if (image.rows <= 0 || image.cols <= 0 || image.channels() <= 0) {
qInfo() << "图像尺寸或通道数不正确,需排查原因";
return ;
}
updateImage(image, currentTime);
}
void CameraHandle::setTimeoutMs(int timeoutMs){
......@@ -429,7 +422,6 @@ void CameraHandle::matToBase64(const cv::Mat &image, QByteArray &base64Data) {
cv::imencode(".jpg", image, buffer, params);
base64Data = QByteArray(reinterpret_cast<const char*>(buffer.data()), buffer.size()).toBase64();
}
void CameraHandle::checkAndUpdateCurrentPlate(ParkingSpaceInfo*park,const cv::Mat &frame, RecognizedInfo& newInfo,
int &result,std::map<int,RecognizedInfo>&exitAndMoMap){
if (newInfo.getLicensePlate() != park->getCurrentPlate().getLicensePlate()) {
......@@ -445,26 +437,31 @@ void CameraHandle::checkAndUpdateCurrentPlate(ParkingSpaceInfo*park,const cv::Ma
if(park->getCurrentPlate().getLicensePlate().length()<=0){
//进场
park->setCurrentPlate(newInfo);
result=CAR_INFORMATION::Mobilization;
result=Mobilization;
}else {
//当前为空,立场
if(newInfo.getLicensePlate().length()<=0){
HumanDetection &humanDetection=HumanDetection::getInstance();
int car_size = humanDetection.findHuManCar(frame,1,detector);
qDebug()<<sSn<<":"<<"当前车形数量:"<<car_size;
if(car_size<=0){
//出场
park->setCurrentPlate(newInfo);
result=CAR_INFORMATION::Exit;
result=Exit;
}else{
park-> removeNoQueue();
qDebug()<<sSn<<":"<<"no出场:"<<car_size;
}
}else{
qDebug()<<sSn<<":"<<"出场:"<<2;
qDebug()<<sSn<<":"<<"老车出场:"<<park->getCurrentPlate().getLicensePlate();
qDebug()<<sSn<<":"<<"老车出场:"<<park->getCurrentPlate().getLicensePlate();
//当前不为空,新车,新车入场,老车出场
exitAndMoMap[CAR_INFORMATION::Exit]=park->getCurrentPlate();
exitAndMoMap[CAR_INFORMATION::Mobilization]=newInfo;
exitAndMoMap[Exit]=park->getCurrentPlate();
exitAndMoMap[Mobilization]=newInfo;
park->setCurrentPlate(newInfo);
result=CAR_INFORMATION::ExitAndMobilization;
result=ExitAndMobilization;
}
}
}
......@@ -478,12 +475,20 @@ void CameraHandle::updateImage(const cv::Mat & frame,qint64 currentTime){
qDebug()<<"=============================>";
static int i=0;
printf("updateImage%d次\n", ++i);
faceCount.fetch_add(1, std::memory_order_relaxed);
qDebug()<<"faceCount==>"<<faceCount.load(std::memory_order_relaxed);
int width = frame.cols; // 获取图像宽度
int height = frame.rows; // 获取图像高度
qDebug()<<"frame 宽度:"<<width<<"frame 高度:"<<height;
FaceReconition &faceRecognition = FaceReconition::getInstance();
HumanDetection &humanDetection=HumanDetection::getInstance();
LicensePlateRecognition &licensePlateRecogn =LicensePlateRecognition::getInstance();
static int i=0;
printf("updateImage retryCount: %d \n", ++i);
static int ii=0;
printf("updateImage retryCount: %d \n", ++ii);
//faceRecognition.search(frame,imageHandleList,names);
QByteArray imgs;
int faSize=humanDetection.findHuManCar(frame,0,detector);
......@@ -491,6 +496,7 @@ void CameraHandle::updateImage(const cv::Mat & frame,qint64 currentTime){
this->matToBase64(frame, imgs);
HttpService httpService(httpUrl);
if(currentFace!=faSize){
if(faceCount.load(std::memory_order_relaxed)%face_frequency==0){
vides_data::response* resp=httpService.httpPostFacePopulation(imgs,faSize,sSn,currentTime);
if (resp->code!= 0) {
qInfo()<<"人数变化推送信息推送失败";
......@@ -498,11 +504,13 @@ void CameraHandle::updateImage(const cv::Mat & frame,qint64 currentTime){
instace.deleteObj(resp);
currentFace=faSize;
}
}
if(faSize>0){
qDebug() << "faceRecognition.doesItExistEmployee Current thread ID: " << QThread::currentThreadId();
std::list<vides_data::faceRecognitionResult>faces;
faceRecognition.doesItExistEmployee(frame,faces);
if (!faces.empty()) {
for (const auto& face : faces) {
if (faces.size()>0) {
for(auto face:faces){
vides_data::requestFaceReconition faceReconition;
faceReconition.id = face.id;
faceReconition.img = imgs;
......@@ -521,7 +529,7 @@ void CameraHandle::updateImage(const cv::Mat & frame,qint64 currentTime){
vides_data::response* resp = httpService.httpPostFaceReconition(faceReconition);
if (resp->code!= 0) {
qInfo() << "识别人脸信息推送失败:" << face.id;
qInfo()<<"识别人脸信息推送失败"<<face.id;
}
instace.deleteObj(resp);
}
......@@ -589,9 +597,38 @@ void CameraHandle::updateImage(const cv::Mat & frame,qint64 currentTime){
value->removeQueue();
}
vides_data::LicensePlate recognition= indexToLicensePlate.at(key);
RecognizedInfo recognizedInfo(recognition.new_plate,recognition.time,recognition.new_color);
value->addQueue(recognizedInfo);
RecognizedInfo recognizedInfo;
if (recognition.new_color=="蓝牌" && recognition.new_plate.length() != 7) {
return;
} else if (recognition.new_color=="绿牌新能源" && recognition.new_plate.length() != 8) {
return;
} else if (recognition.new_plate.length() != 7) {
return;
}
if(recognition.text_confidence>=instace.getCarConfidenceMax()){
if(value->getQueue().size()>=7 && value->getQueue().size()<=10) {
for (int i = 0; i < 3; ++i) {
value->removeQueue();
}
}
for (int var = 0; var < 3; ++var) {
RecognizedInfo info(recognition.new_plate,recognition.time,recognition.new_color);
value->addQueue(info);
recognizedInfo=std::move(info);
}
this->checkAndUpdateCurrentPlate(value,frame,recognizedInfo,res,exitMoMap);
}
if(recognition.text_confidence<=instace.getCarConfidenceMin()){
return;
}
if(recognition.text_confidence>instace.getCarConfidenceMin()
&& recognition.text_confidence<instace.getCarConfidenceMax())
{
RecognizedInfo info(recognition.new_plate,recognition.time,recognition.new_color);
value->addQueue(info);
recognizedInfo=std::move(info);
this->checkAndUpdateCurrentPlate(value,frame,recognizedInfo,res,exitMoMap);
}
if (res == Exit || res == Mobilization) {
recognition.areaLocation=value->getArea();
recognition.img=imgs;
......
......@@ -21,6 +21,7 @@
#include <QDateTime>
#include <QJsonDocument>
#include <memory>
#include <functional>
#include <QString>
#include <QObject>
#include <QTimer>
......@@ -28,9 +29,7 @@
#include <QQueue>
#include <opencv2/opencv.hpp>
#include <QSemaphore>
#include <atomic>
enum CAR_INFORMATION {
Exit, //出场
Mobilization, //进场
......@@ -54,7 +53,7 @@ public:
void clearCameraHandle();
// void rebindTimer(int hDevice);
void initSdkRealTimeDevSnapSyn(int hDevice,int syn_timer);
void initSdkRealTimeDevSnapSyn(int hDevice,int syn_timer,uint64 face_frequency);
void updateImage(const cv::Mat & frame,qint64 currentTime);
......@@ -79,7 +78,6 @@ public:
QString startTimer,QString endTime);
void setTimeoutMs(int timeoutMs);
bool acquireAndReleaseWithTimeout(bool flag);
QString getSSn();
......@@ -110,10 +108,11 @@ signals:
void callbackFrameReady(const cv::Mat &frame, const QString &url);
void afterDownloadFile( int id,int recognitionType,QString ossUrl);
private slots:
void sdkRealTimeDevSnapSyn(int hDevice);
void pushRecordToCloud(int id,int recognitionType,QString ossUrl);
void releaseSemaphore();
//void releaseSemaphore();
private :
int hDevice;
......@@ -143,8 +142,6 @@ private :
//2秒钟抓一次图
QTimer *dev_snap_syn_timer;
QTimer *release_timer;
int offlineCount=0;
TCV_HumanDetector *detector;
......@@ -156,7 +153,9 @@ private :
int timeoutMs;
int image_save;
std::atomic<uint64> faceCount;
uint64 face_frequency;
};
......
......@@ -63,6 +63,19 @@ void Common::setImages(QString images){
this->images=images;
}
float Common::getCarConfidenceMax() const{
return carConfidenceMax;
}
void Common::setCarConfidenceMax(float carConfidenceMax){
this->carConfidenceMax=carConfidenceMax;
}
float Common::getCarConfidenceMin() const{
return carConfidenceMin;
}
void Common::setCarConfidenceMin(float carConfidenceMin){
this->carConfidenceMin=carConfidenceMin;
}
QString Common::GetLocalIp() {
QString ipAddress;
QList<QHostAddress> list = QNetworkInterface::allAddresses();
......
......@@ -44,6 +44,11 @@ public:
QString GetLocalIp();
float getCarConfidenceMax() const;
void setCarConfidenceMax(float carConfidenceMax);
float getCarConfidenceMin() const;
void setCarConfidenceMin(float carConfidenceMin);
template<typename T>
void deleteObj(T*& obj) {
......@@ -57,6 +62,8 @@ private:
QString videoOut;
QString videoDownload;
QString images;
float carConfidenceMax;
float carConfidenceMin;
Common();
~Common();
......
......@@ -6,14 +6,13 @@
#include "herror.h"
#include "LogHandle.h"
#include "VidesData.h"
#include <mutex>
class FaceReconition
{
private:
static FaceReconition* instance;
HContextHandle ctxHandle;
HContextHandle ctxHandle=nullptr;
float configConfidence;
......
#include "FaceRecognition.h"
#include <QThread>
FaceReconition::FaceReconition() {}
......@@ -23,23 +25,21 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,float con
QString bPath = QCoreApplication::applicationDirPath() + "/model_zip/T1_5";
#else
#error "不支持的架构"
#endif
QByteArray && bypath = bPath.toUtf8();
char* spath = bypath.data();
HString path = spath;
HInt32 option = HF_ENABLE_QUALITY | HF_ENABLE_FACE_RECOGNITION | HF_ENABLE_MASK_DETECT;
HF_DetectMode detMode = HF_DETECT_MODE_IMAGE; // 选择图像模式 即总是检测
if(ctxHandle==nullptr){
// 创建ctx
ret = HF_CreateFaceContextFromResourceFileOptional(path, option, detMode, 5, &ctxHandle);
if (ret != HSUCCEED) {
qInfo() << QString("Create ctx error: %1").arg(ret);
return;
}
// ret = HF_FaceRecognitionThresholdSetting(ctxHandle, 0.36);
// if (ret != HSUCCEED) {
// qInfo() << QString("HF_FaceRecognitionThresholdSetting error: %1").arg(ret);
// return;
// }
}
customIds.clear();
int i = 0;
for (auto it = maps.begin(); it != maps.end(); ++it,++i) {
......@@ -118,8 +118,6 @@ int FaceReconition::featureRemove(){
HResult ret= HF_FeaturesGroupFeatureRemove(ctxHandle,customId);
qDebug()<<"ret:featureRemove "<<ret;
}
HF_ReleaseFaceContext(ctxHandle);
}
}
void FaceReconition::doesItExistEmployee(const cv::Mat &source,std::list<vides_data::faceRecognitionResult>&faces){
......
......@@ -18,6 +18,7 @@ int HumanDetection::findHuManCar(const cv::Mat &source,int res,TCV_HumanDetector
int num = (res == 0) ? TCV_HumanDetectorGetNumOfHuman(detector) :TCV_HumanDetectorGetNumOfCar(detector);
qDebug() << (res == 0 ? "Number of people detected:" : "Number of cars detected:") << num;
TCV_ReleaseCameraStream(stream);
return num;
}
......@@ -131,6 +131,7 @@ void LicensePlateRecognition::licensePlateNumber(const cv::Mat &source, QString
newPlate.time=currentTime;
newPlate.new_color=QString::fromStdString(type);
newPlate.new_plate=QString::fromUtf8(results.plates[i].code);
newPlate.text_confidence=results.plates[i].text_confidence;
vides_data::ParkingArea area;
area.topLeftCornerX=results.plates[i].x1;
area.topLeftCornerY=results.plates[i].y1;
......
#include "ScopeSemaphoreExit.h"
ScopeSemaphoreExit::ScopeSemaphoreExit(std::function<void()> onExit)
: onExit_(onExit) {}
ScopeSemaphoreExit::~ScopeSemaphoreExit() {
if (onExit_) onExit_();
}
#ifndef SCOPESEMAPHOREEXIT_H
#define SCOPESEMAPHOREEXIT_H
#include <functional>
class ScopeSemaphoreExit {
public:
explicit ScopeSemaphoreExit(std::function<void()> onExit);
~ScopeSemaphoreExit();
private:
std::function<void()> onExit_;
};
#endif // SCOPESEMAPHOREEXIT_H
......@@ -133,6 +133,7 @@ struct LicensePlate
QByteArray img;
qint64 time;
ParkingArea recognition;
float text_confidence;
LicensePlate() {}
};
......
......@@ -93,7 +93,8 @@ SOURCES += \
TaskRunnable.cpp \
CameraHandle.cpp \
ParkingSpaceInfo.cpp \
HumanDetection.cpp
HumanDetection.cpp \
ScopeSemaphoreExit.cpp
HEADERS += \
Common.h \
......@@ -109,7 +110,8 @@ HEADERS += \
TaskRunnable.h \
CameraHandle.h \
ParkingSpaceInfo.h \
HumanDetection.h
HumanDetection.h \
ScopeSemaphoreExit.h
#FORMS += \
# mainwindow.ui
......
......@@ -15,7 +15,7 @@ MainWindow::MainWindow()
modelPaths=qSetting->value("licensePlateRecognition/model_paths").toString();
initVideoOutPath();
initCommon();
deleteLogFileTimer =new QTimer(this);
connect(deleteLogFileTimer, &QTimer::timeout, this, &MainWindow::deleteLogFile);
......@@ -306,13 +306,23 @@ void MainWindow::startCamera(const QString &httpurl){
// QString serialNumber = QSysInfo::machineUniqueId();
QString serialNumber;
findLocalSerialNumber(serialNumber);
vides_data::requestDeviceStatus reStatus;
reStatus.sSn=serialNumber;
reStatus.status=1;
reStatus.type=1;
reStatus.ip_addr=instace.GetLocalIp();
HttpService httpService(httpurl);
vides_data::response *re= httpService.httpFindCameras(serialNumber,devices);
if(re->code==0){
if(devices.list.size()<=0){
instace.deleteObj(re);
return;
vides_data::response *res=httpService.httpPostDeviceStatus(reStatus);
if(res->code!=0){
qInfo()<<"盒子状态上报失败 code:"<<res->code<<"msg:"<<res->data;
}
instace.deleteObj(res);
httpService.setHttpUrl(httpurl);
vides_data::response *re= httpService.httpFindCameras(serialNumber,devices);
if(re->code==0 || re->code==20004){
QString username = qSetting->value("devices/username").toString();
QString password = qSetting->value("devices/password").toString();
std::map<QString,vides_data::localDeviceStatus*> localDevices;
......@@ -363,23 +373,9 @@ void MainWindow::startCamera(const QString &httpurl){
// 清空 localDevices 容器
localDevices.clear();
}
vides_data::requestDeviceStatus reStatus;
reStatus.sSn=serialNumber;
reStatus.status=1;
reStatus.type=1;
reStatus.ip_addr=instace.GetLocalIp();
qDebug()<<"local.ip_addr===>"<<reStatus.ip_addr;
httpService.setHttpUrl(httpurl);
qDebug()<<"httpurl===>"<<httpurl;
qDebug()<<"serialNumber===>"<<serialNumber;
vides_data::response *res=httpService.httpPostDeviceStatus(reStatus);
if(res->code!=0){
qInfo()<<"盒子状态上报失败 code:"<<res->code<<"msg:"<<res->data;
}
updateLocalFace(httpurl);
instace.deleteObj(re);
instace.deleteObj(res);
}
......@@ -718,7 +714,9 @@ void MainWindow::initCameras(vides_data::cameraParameters &parameter,const std::
cameraHandle->sdkDevSetAlarmListener(sdk_handle,1);
int synTime=qSetting->value("timer/dev_snap_syn_timer").toInt();
cameraHandle->initSdkRealTimeDevSnapSyn(sdk_handle,synTime);
uint64 face_frequency=qSetting->value("devices/face_frequency").toULongLong();
cameraHandle->initSdkRealTimeDevSnapSyn(sdk_handle,synTime,face_frequency);
int seTime=qSetting->value("timer/semaphore_time").toInt();
cameraHandle->setTimeoutMs(seTime);
cameraHandle->initParkingSpaceInfo(areas);
......@@ -777,11 +775,15 @@ void MainWindow::createDirectory(int flag,const QString& dirName, const QString&
}
}
void MainWindow::initVideoOutPath(){
void MainWindow::initCommon(){
createDirectory(0x01,"frame_images", "目录创建成功", "目录创建失败");
createDirectory(0x00,"frame_video", "创建视频目录成功", "视频目录创建失败");
createDirectory(0x02,"images", "图片目录创建成功", "图片目录创建失败");
float carConfidenceMax=qSetting->value("devices/carConfidenceMax").toFloat();
float carConfidenceMin=qSetting->value("devices/carConfidenceMin").toFloat();
Common& instance = Common::getInstance();
instance.setCarConfidenceMax(carConfidenceMax);
instance.setCarConfidenceMin(carConfidenceMin);
}
MainWindow::~MainWindow()
......
......@@ -34,12 +34,13 @@ class MainWindow : public QObject
public:
explicit MainWindow();
void initVideoOutPath();
void initCommon();
void setVideoPath(int flag, const QString& path);
void createDirectory(int flag,const QString& dirName, const QString& successMsg, const QString& failureMsg);
void initFaceFaceRecognition();
void initCameras(vides_data::cameraParameters &parameter,const std::list<vides_data::responseArea>&areas);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment