Commit 6b53f770 by “liusq”

人形调度器负责分配线程

parent 5fe7062a
#ifndef CAMERAHANDLE_H #ifndef CAMERAHANDLE_H
#define CAMERAHANDLE_H #define CAMERAHANDLE_H
#include "RecognitionInfo.h" #include "RecognitionInfo.h"
#include "FaceRecognition.h" #include "FaceReconitionHandle.h"
#include "HttpService.h" #include "HttpService.h"
#include "LicensePlateRecognition.h" #include "LicensePlateRecognition.h"
#include "Json_Header/AlarmInfo.h" #include "Json_Header/AlarmInfo.h"
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include "Json_Header/OPMachine.h" #include "Json_Header/OPMachine.h"
#include "mainwindow.h" #include "mainwindow.h"
#include "ParkingSpaceInfo.h" #include "ParkingSpaceInfo.h"
#include "so_human_sdk.h"
#include "hyper_lpr_sdk.h" #include "hyper_lpr_sdk.h"
#include <QPolygon> #include <QPolygon>
#include <QPainterPath> #include <QPainterPath>
...@@ -41,13 +40,12 @@ enum CAR_INFORMATION { ...@@ -41,13 +40,12 @@ enum CAR_INFORMATION {
class CameraHandle: public QObject { class CameraHandle: public QObject {
Q_OBJECT Q_OBJECT
public: public:
CameraHandle(QString &url,QString &httpUrl,QString &sSn, int &channel, CameraHandle(QString &url,QString &httpUrl,QString &sSn, int &channel, int imageSave);
const QString &modelPaths,
float carConfidence,float carShapeConfidence, int imageSave);
CameraHandle(); CameraHandle();
~CameraHandle(); ~CameraHandle();
int sdkDevLoginSyn(QString sDevId, int nDevPort, QString sUserName, QString sPassword, int nTimeout); int sdkDevLoginSyn(QString sDevId, int nDevPort, QString sUserName, QString sPassword, int nTimeout);
//int SdkMediaGetFaceImage(int hDevice, int nSeq, int nTimeout); //int SdkMediaGetFaceImage(int hDevice, int nSeq, int nTimeout);
int sdkDevSetAlarmListener(XSDK_HANDLE hDevice, int bListener); int sdkDevSetAlarmListener(XSDK_HANDLE hDevice, int bListener);
int getHdevice(); int getHdevice();
...@@ -56,9 +54,12 @@ public: ...@@ -56,9 +54,12 @@ public:
void clearCameraHandle(); void clearCameraHandle();
void initAlgorithmParameter(float &height_reference);
// void rebindTimer(int hDevice); // void rebindTimer(int hDevice);
void initSdkRealTimeDevSnapSyn(int hDevice,int syn_timer,uint64 face_frequency); void initSdkRealTimeDevSnapSyn(int hDevice,int syn_timer,uint64 face_frequency);
void notificationUpdateImageMap(std::map<QString,QString>&maps,int numberFaces,float confidence);
void featureRemove();
void updateImage(const cv::Mat & frame,qint64 currentTime); void updateImage(const cv::Mat & frame,qint64 currentTime);
...@@ -157,6 +158,8 @@ private : ...@@ -157,6 +158,8 @@ private :
std::map<int, vides_data::responseRecognitionData> videoCurrentData; std::map<int, vides_data::responseRecognitionData> videoCurrentData;
std::map<QString, QString> currentData; std::map<QString, QString> currentData;
FaceReconitionHandle *faceReconitionHandle;
//每个区域编号对应一个区域信息 //每个区域编号对应一个区域信息
std::map<int,ParkingSpaceInfo*>parkMap; std::map<int,ParkingSpaceInfo*>parkMap;
...@@ -171,11 +174,7 @@ private : ...@@ -171,11 +174,7 @@ private :
QTimer *dev_snap_syn_timer; QTimer *dev_snap_syn_timer;
int offlineCount=0; int offlineCount=0;
TCV_HumanDetector *detector;
P_HLPR_Context ctx ;
QSemaphore semaphore; QSemaphore semaphore;
int image_save; int image_save;
......
...@@ -76,6 +76,13 @@ float Common::getCarConfidenceMin() const{ ...@@ -76,6 +76,13 @@ float Common::getCarConfidenceMin() const{
void Common::setCarConfidenceMin(float carConfidenceMin){ void Common::setCarConfidenceMin(float carConfidenceMin){
this->carConfidenceMin=carConfidenceMin; this->carConfidenceMin=carConfidenceMin;
} }
int Common::getHumanDetectionLen() const{
return humanDetectionLen;
}
void Common::setHumanDetectionLen(int humanDetectionLen){
this->humanDetectionLen=humanDetectionLen;
}
QString Common::GetLocalIp() { QString Common::GetLocalIp() {
QString ipAddress; QString ipAddress;
QList<QHostAddress> list = QNetworkInterface::allAddresses(); QList<QHostAddress> list = QNetworkInterface::allAddresses();
......
...@@ -50,6 +50,14 @@ public: ...@@ -50,6 +50,14 @@ public:
float getCarConfidenceMin() const; float getCarConfidenceMin() const;
void setCarConfidenceMin(float carConfidenceMin); void setCarConfidenceMin(float carConfidenceMin);
int getHumanDetectionLen() const;
void setHumanDetectionLen(int humanDetectionLen);
template <typename T>
const T& clamp(const T& v, const T& lo, const T& hi)
{
return (v < lo) ? lo : (hi < v) ? hi : v;
}
template<typename T> template<typename T>
void deleteObj(T*& obj) { void deleteObj(T*& obj) {
if(obj != nullptr) { if(obj != nullptr) {
...@@ -64,6 +72,7 @@ private: ...@@ -64,6 +72,7 @@ private:
QString images; QString images;
float carConfidenceMax; float carConfidenceMax;
float carConfidenceMin; float carConfidenceMin;
int humanDetectionLen;
Common(); Common();
~Common(); ~Common();
......
//#ifndef FACEDETECTIONPARKINGPUSHIMPL_H
//#define FACEDETECTIONPARKINGPUSHIMPL_H
//#include "XSDKPublic.h"
//#include "FaceDetectionParkingPush.h"
//#include "XNetSDKDefine.h"
//#include "Common.h"
//#include "CameraThread.h"
//#include "MediaFaceImage.h"
//class FaceDetectionParkingPushImpl {
//public:
// FaceDetectionParkingPushImpl(FaceDetectionParkingPush* parent,QString &framePath, QString &url);
// int SdkInit(QString &szConfigPath, QString &szTempPath);
// XSDK_HANDLE SdkDevLoginSyn(QString sDevId, int nDevPort, QString sUserName, QString sPassword, int nTimeout);
// XSDK_HANDLE SdkMediaGetFaceImage(XSDK_HANDLE hDevice, int nSeq, int nTimeout);
// int callbackFunction(XSDK_HANDLE hObject, int nMsgId, int nParam1, int nParam2, int nParam3, const char* szString, void* pObject, int64 lParam, int nSeq, void* pUserData, void* pMsg);
// CameraThread *getCameraThread();
//private:
// SXSDKInitParam *pParam;
// SXSDKLoginParam *loginParam;
// SXMediaFaceImageReq *sxMediaFaceImageReq;
// CameraThread *cameraThread;
// QString framePath;
// QString url;
// FaceDetectionParkingPush* parent;
//};
//#endif // FACEDETECTIONPARKINGPUSHIMPL_H
#include "FaceRecognition.h" #include "FaceReconitionHandle.h"
#include <QImage> #include <QImage>
#include <QThread> #include <QThread>
#include <iostream> #include <iostream>
#define cimg_display 0 #define cimg_display 0
#include "CImg.h" #include "CImg.h"
using namespace cimg_library; using namespace cimg_library;
FaceReconitionHandle::FaceReconitionHandle() {
}
FaceReconition::FaceReconition() {} FaceReconitionHandle::~FaceReconitionHandle(){
FaceReconition::~FaceReconition(){
if (ctxHandle != nullptr) { if (ctxHandle != nullptr) {
HF_ReleaseFaceContext(ctxHandle); HF_ReleaseFaceContext(ctxHandle);
ctxHandle = nullptr; ctxHandle = nullptr;
} }
} }
FaceReconition* FaceReconition::instance = nullptr;
cv::Mat FaceReconition::loadImage(const QString &path) { cv::Mat FaceReconitionHandle::loadImage(const QString &path) {
// 尝试使用OpenCV直接加载图像 // 尝试使用OpenCV直接加载图像
std::string stdPath = path.toStdString(); std::string stdPath = path.toStdString();
cv::Mat image = cv::imread(stdPath, cv::IMREAD_COLOR); cv::Mat image = cv::imread(stdPath, cv::IMREAD_COLOR);
...@@ -31,12 +29,12 @@ cv::Mat FaceReconition::loadImage(const QString &path) { ...@@ -31,12 +29,12 @@ cv::Mat FaceReconition::loadImage(const QString &path) {
qDebug() << "图像以OpenCV成功加载。"; qDebug() << "图像以OpenCV成功加载。";
return image; return image;
} }
return loadImageFromByteStream(path); return loadImageFromByteStream(path);
} }
void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numberFaces,float confidence){ void FaceReconitionHandle::initSourceImageMap(std::map<QString,QString>&maps,int numberFaces,float confidence){
//QWriteLocker locker(&rwLock); QWriteLocker locker(&rwLock);
featureRemove(); featureRemove();
HResult ret; HResult ret;
// 初始化context // 初始化context
...@@ -47,7 +45,7 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe ...@@ -47,7 +45,7 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe
QString bPath = QCoreApplication::applicationDirPath() + "/model_zip/T1_5"; QString bPath = QCoreApplication::applicationDirPath() + "/model_zip/T1_5";
#else #else
#error "不支持的架构" #error "不支持的架构"
#endif #endif
QByteArray && bypath = bPath.toUtf8(); QByteArray && bypath = bPath.toUtf8();
char* spath = bypath.data(); char* spath = bypath.data();
...@@ -65,7 +63,7 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe ...@@ -65,7 +63,7 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe
customIds.clear(); customIds.clear();
int i = 0; int i = 0;
qInfo()<< "加载图像size: "<<maps.size(); qInfo()<< "加载图像size: "<<maps.size();
for (auto it = maps.begin(); it != maps.end(); ++it,++i) { for (auto it = maps.begin(); it != maps.end(); ++it,++i) {
const QString& key = it->first; const QString& key = it->first;
QString& value = it->second; QString& value = it->second;
...@@ -80,7 +78,7 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe ...@@ -80,7 +78,7 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe
imageData.width = image.cols; imageData.width = image.cols;
imageData.rotation = VIEW_ROTATION_0; imageData.rotation = VIEW_ROTATION_0;
imageData.format = FORMAT_BGR; imageData.format = FORMAT_BGR;
HImageHandle imageSteamHandle; HImageHandle imageSteamHandle;
ret = HF_CreateImageStream(&imageData, &imageSteamHandle); ret = HF_CreateImageStream(&imageData, &imageSteamHandle);
this->configConfidence=confidence; this->configConfidence=confidence;
...@@ -89,25 +87,25 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe ...@@ -89,25 +87,25 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe
HF_ReleaseImageStream(imageSteamHandle); // 释放资源 HF_ReleaseImageStream(imageSteamHandle); // 释放资源
return; return;
} }
HF_MultipleFaceData multipleFaceData = {0}; HF_MultipleFaceData multipleFaceData = {0};
HF_FaceContextRunFaceTrack(ctxHandle, imageSteamHandle, &multipleFaceData); HF_FaceContextRunFaceTrack(ctxHandle, imageSteamHandle, &multipleFaceData);
if (multipleFaceData.detectedNum <= 0) { if (multipleFaceData.detectedNum <= 0) {
qInfo() << QString("initSourceImageMap:未检测到人脸: %1").arg(key); qInfo() << QString("initSourceImageMap:未检测到人脸: %1").arg(key);
HF_ReleaseImageStream(imageSteamHandle); // 释放资源 HF_ReleaseImageStream(imageSteamHandle); // 释放资源
return; return;
} }
HF_FaceFeature feature = {0}; HF_FaceFeature feature = {0};
ret = HF_FaceFeatureExtract(ctxHandle, imageSteamHandle, multipleFaceData.tokens[0], &feature); ret = HF_FaceFeatureExtract(ctxHandle, imageSteamHandle, multipleFaceData.tokens[0], &feature);
if (ret != HSUCCEED) { if (ret != HSUCCEED) {
qInfo() << QString("特征提取出错: %1").arg(ret); qInfo() << QString("特征提取出错: %1").arg(ret);
HF_ReleaseImageStream(imageSteamHandle); // 释放资源 HF_ReleaseImageStream(imageSteamHandle); // 释放资源
return; return;
} }
char* tagName = new char[key.size() + 1]; char* tagName = new char[key.size() + 1];
std::strcpy(tagName, key.toStdString().c_str()); std::strcpy(tagName, key.toStdString().c_str());
HF_FaceFeatureIdentity identity = {0}; HF_FaceFeatureIdentity identity = {0};
...@@ -115,16 +113,16 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe ...@@ -115,16 +113,16 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe
identity.customId = i; identity.customId = i;
customIds.push_back( identity.customId); customIds.push_back( identity.customId);
identity.tag = tagName; identity.tag = tagName;
ret = HF_FeaturesGroupInsertFeature(ctxHandle, identity); ret = HF_FeaturesGroupInsertFeature(ctxHandle, identity);
if (ret != HSUCCEED) { if (ret != HSUCCEED) {
qInfo() << QString("插入失败: %1").arg(ret); qInfo() << QString("插入失败: %1").arg(ret);
HF_ReleaseImageStream(imageSteamHandle); // 释放资源 HF_ReleaseImageStream(imageSteamHandle); // 释放资源
return; return;
} }
delete[] tagName; delete[] tagName;
ret = HF_ReleaseImageStream(imageSteamHandle); ret = HF_ReleaseImageStream(imageSteamHandle);
if (ret == HSUCCEED) { if (ret == HSUCCEED) {
imageSteamHandle = nullptr; imageSteamHandle = nullptr;
...@@ -135,7 +133,7 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe ...@@ -135,7 +133,7 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe
} }
} }
int FaceReconition::featureRemove(){ void FaceReconitionHandle::featureRemove(){
if(customIds.size()>0){ if(customIds.size()>0){
for(auto customId:customIds){ for(auto customId:customIds){
HResult ret= HF_FeaturesGroupFeatureRemove(ctxHandle,customId); HResult ret= HF_FeaturesGroupFeatureRemove(ctxHandle,customId);
...@@ -145,7 +143,7 @@ int FaceReconition::featureRemove(){ ...@@ -145,7 +143,7 @@ int FaceReconition::featureRemove(){
} }
cv::Mat FaceReconition::loadImageFromByteStream(const QString& filePath) { cv::Mat FaceReconitionHandle::loadImageFromByteStream(const QString& filePath) {
try { try {
// 使用 CImg 读取 JPEG 图像 // 使用 CImg 读取 JPEG 图像
...@@ -179,8 +177,9 @@ cv::Mat FaceReconition::loadImageFromByteStream(const QString& filePath) { ...@@ -179,8 +177,9 @@ cv::Mat FaceReconition::loadImageFromByteStream(const QString& filePath) {
void FaceReconition::doesItExistEmployee(const cv::Mat &source,std::list<vides_data::faceRecognitionResult>&faces){ void FaceReconitionHandle::doesItExistEmployee(const cv::Mat &source,std::list<vides_data::faceRecognitionResult>&faces){
//QReadLocker locker(&rwLock); QReadLocker locker(&rwLock);
HResult ret; HResult ret;
HF_ContextCustomParameter parameter = {0}; HF_ContextCustomParameter parameter = {0};
HF_ImageData imageData = {0}; HF_ImageData imageData = {0};
...@@ -189,7 +188,7 @@ void FaceReconition::doesItExistEmployee(const cv::Mat &source,std::list<vides_d ...@@ -189,7 +188,7 @@ void FaceReconition::doesItExistEmployee(const cv::Mat &source,std::list<vides_d
imageData.width = source.cols; imageData.width = source.cols;
imageData.rotation = VIEW_ROTATION_0; imageData.rotation = VIEW_ROTATION_0;
imageData.format = FORMAT_BGR; imageData.format = FORMAT_BGR;
HImageHandle imageSteamHandle; HImageHandle imageSteamHandle;
ret = HF_CreateImageStream(&imageData, &imageSteamHandle); ret = HF_CreateImageStream(&imageData, &imageSteamHandle);
if (ret != HSUCCEED) { if (ret != HSUCCEED) {
...@@ -198,12 +197,12 @@ void FaceReconition::doesItExistEmployee(const cv::Mat &source,std::list<vides_d ...@@ -198,12 +197,12 @@ void FaceReconition::doesItExistEmployee(const cv::Mat &source,std::list<vides_d
} }
HF_MultipleFaceData multipleFaceData = {0}; HF_MultipleFaceData multipleFaceData = {0};
HF_FaceContextRunFaceTrack(ctxHandle, imageSteamHandle, &multipleFaceData); HF_FaceContextRunFaceTrack(ctxHandle, imageSteamHandle, &multipleFaceData);
if (multipleFaceData.detectedNum <= 0) { if (multipleFaceData.detectedNum <= 0) {
qDebug()<<QString("search 未检测到人脸"); qDebug()<<QString("search 未检测到人脸");
return ; return ;
} }
std::vector<std::vector<float>> features; std::vector<std::vector<float>> features;
// 被搜索的目标这边推荐使用拷贝式的接口来获取特征向量 // 被搜索的目标这边推荐使用拷贝式的接口来获取特征向量
HInt32 featureNum; HInt32 featureNum;
...@@ -233,7 +232,7 @@ void FaceReconition::doesItExistEmployee(const cv::Mat &source,std::list<vides_d ...@@ -233,7 +232,7 @@ void FaceReconition::doesItExistEmployee(const cv::Mat &source,std::list<vides_d
qInfo()<<QString("搜索失败: %1").arg(ret); qInfo()<<QString("搜索失败: %1").arg(ret);
return ; return ;
} }
qDebug()<<QString("搜索置信度: %1").arg(confidence); qDebug()<<QString("搜索置信度: %1").arg(confidence);
qDebug()<<QString("匹配到的tag: %1").arg(searchIdentity.tag); qDebug()<<QString("匹配到的tag: %1").arg(searchIdentity.tag);
qDebug()<<QString("匹配到的customId: %1").arg(searchIdentity.customId); qDebug()<<QString("匹配到的customId: %1").arg(searchIdentity.customId);
...@@ -248,7 +247,7 @@ void FaceReconition::doesItExistEmployee(const cv::Mat &source,std::list<vides_d ...@@ -248,7 +247,7 @@ void FaceReconition::doesItExistEmployee(const cv::Mat &source,std::list<vides_d
newface.height=multipleFaceData.rects[rect].height; newface.height=multipleFaceData.rects[rect].height;
faces.push_back(newface); faces.push_back(newface);
} }
rect++; rect++;
} }
ret = HF_ReleaseImageStream(imageSteamHandle); ret = HF_ReleaseImageStream(imageSteamHandle);
......
#ifndef FACERECOGNITION_H #ifndef FACERECONITIONHANDLE_H
#define FACERECOGNITION_H #define FACERECONITIONHANDLE_H
#include "hyperface.h" #include "hyperface.h"
#include "herror.h" #include "herror.h"
#include "LogHandle.h" #include "LogHandle.h"
#include "VidesData.h" #include "VidesData.h"
#include <opencv2/opencv.hpp> #include <opencv2/opencv.hpp>
#include <QReadWriteLock>
#include<QCoreApplication> #include<QCoreApplication>
class FaceReconition class FaceReconitionHandle
{ {
private: private:
static FaceReconition* instance;
HContextHandle ctxHandle=nullptr; HContextHandle ctxHandle=nullptr;
float configConfidence; float configConfidence;
std::vector<int32_t>customIds; std::vector<int32_t>customIds;
FaceReconition(); QReadWriteLock rwLock;
~FaceReconition();
public: public:
static FaceReconition& getInstance() FaceReconitionHandle();
{ ~FaceReconitionHandle();
static FaceReconition instance;
return instance;
}
cv::Mat loadImage(const QString &path); cv::Mat loadImage(const QString &path);
cv::Mat loadImageFromByteStream(const QString& filePath); cv::Mat loadImageFromByteStream(const QString& filePath);
...@@ -39,8 +33,7 @@ public: ...@@ -39,8 +33,7 @@ public:
void initSourceImageMap(std::map<QString,QString>&maps,int numberFaces,float confidence); void initSourceImageMap(std::map<QString,QString>&maps,int numberFaces,float confidence);
int featureRemove(); void featureRemove();
};
#endif // FACERECOGNITION_H };
#endif // FACERECONITIONHANDLE_H
...@@ -354,7 +354,11 @@ vides_data::response *HttpService::httpPostFacePopulation(QByteArray &img,int &h ...@@ -354,7 +354,11 @@ vides_data::response *HttpService::httpPostFacePopulation(QByteArray &img,int &h
resp->code=map["code"].toInt(); resp->code=map["code"].toInt();
resp->msg=map["message"].toString(); resp->msg=map["message"].toString();
}else{ }else{
qDebug()<<"httpPostFacePopulation===>";
qDebug()<<m_httpClient.errorCode(); qDebug()<<m_httpClient.errorCode();
qDebug()<<m_httpClient.errorString();
qDebug()<<"httpPostFacePopulation===>end";
resp->code=2; resp->code=2;
resp->msg=OPERATION_FAILED; resp->msg=OPERATION_FAILED;
} }
...@@ -502,6 +506,8 @@ vides_data::response *HttpService::httpDownload( const QString &filePath,QString ...@@ -502,6 +506,8 @@ vides_data::response *HttpService::httpDownload( const QString &filePath,QString
resp->msg=map["message"].toString(); resp->msg=map["message"].toString();
}else{ }else{
qDebug()<<m_httpClient.errorCode(); qDebug()<<m_httpClient.errorCode();
qDebug()<<m_httpClient.errorCode();
resp->code=2; resp->code=2;
resp->msg=OPERATION_FAILED; resp->msg=OPERATION_FAILED;
} }
......
...@@ -2,31 +2,48 @@ ...@@ -2,31 +2,48 @@
#define HUMANDETECTION_H #define HUMANDETECTION_H
#include "VidesData.h" #include "VidesData.h"
#include "so_human_sdk.h" #include "so_human_sdk.h"
#include "ScopeSemaphoreExit.h"
#include <signal.h>
#include <QDateTime>
#include <opencv2/opencv.hpp> #include <opencv2/opencv.hpp>
#include <QDebug> #include <QDebug>
class HumanDetection #include <atomic>
{ #include<QThread>
class HumanDetection:public QObject {
Q_OBJECT
public: public:
HumanDetection(); HumanDetection(const QString &modelPaths,
float carShapeConfidence);
~HumanDetection(); ~HumanDetection();
void initDetector();
int findHuManCar(const cv::Mat &source,int res,TCV_HumanDetector *detector,std::vector<vides_data::ParkingArea> &currentPlate);
static HumanDetection& getInstance() int findHuManCar(const cv::Mat &source,int res,std::map<int,int>&reMap,
{ std::vector<vides_data::ParkingArea> &currentPlate);
static HumanDetection instance;
return instance; void setHuManParameter(float &height_reference,int &uniformColor);
}
void setHeightReference(float &height_reference);
void draw_human_on_image(const cv::Mat& image, const TCV_ObjectLocation* boxes, int size); void draw_human_on_image(const cv::Mat& image, const TCV_ObjectLocation* boxes, int size);
qint64 getThreadTime() const;
bool getIsRunning() const;
void onTimeout();
private: private:
static HumanDetection* instance;
//高度基准 //高度基准
float height_reference; float heightReference;
int uniformColor;
TCV_HumanDetector *detector;
std::atomic<qint64> thread_time;
std::atomic<bool> isRunning{false};
}; };
#endif // HUMANDETECTION_H #endif // HUMANDETECTION_H
#include "HumanDetectionManage.h"
HumanDetectionManage::HumanDetectionManage(int humanDetectionLen):semaphore(humanDetectionLen){
this->humanDetectionLen=humanDetectionLen;
}
void HumanDetectionManage::initHumanDetectionManage(const QString &modelPaths,
float carShapeConfidence,float &height_reference,int &uniformColor) {
for (int i = 0; i < humanDetectionLen; ++i) {
HumanDetection* human=new HumanDetection(modelPaths,carShapeConfidence);
human->setHuManParameter(height_reference,uniformColor);
humanDetections.emplace_back(human);
}
}
HumanDetectionManage::~HumanDetectionManage(){
Common & instace= Common::getInstance();
for (auto task:humanDetections) {
instace.deleteObj(task);
}
}
HumanDetection* HumanDetectionManage::schedulingAlgorithm(QString sSn) {
// 获取当前时间作为基准
qint64 currentTime = QDateTime::currentSecsSinceEpoch();
// 创建一个vector来存储所有可调度的对象
std::vector<HumanDetection*> schedulableObjects;
qint64 maxWaitTime = 0;
// 记录最大等待时间的对象数量
int maxWaitTimeCount = 0;
// 遍历humanDetections,找到所有等待时间相同的未执行的HumanDetection对象
for (HumanDetection* human : humanDetections) {
if (human->getIsRunning()) continue;
// 计算此对象自上次执行以来的等待时间
qint64 waitTime = std::abs(currentTime - human->getThreadTime());
if (waitTime > maxWaitTime) {
schedulableObjects.clear();
schedulableObjects.push_back(human);
maxWaitTime = waitTime;
maxWaitTimeCount = 1;
} else if (waitTime == maxWaitTime) {
schedulableObjects.push_back(human);
maxWaitTimeCount++;
}
}
// 如果最大等待时间的对象数量为1,直接返回
if (maxWaitTimeCount == 1) {
return schedulableObjects.at(0);
}
if (schedulableObjects.empty()) {
return nullptr; // 如果没有可调度对象,返回 nullptr 或进行适当处理
}
// 在可调度的对象中随机选择一个
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dis(0, schedulableObjects.size() - 1);
return schedulableObjects[dis(gen)];
}
int HumanDetectionManage::executeFindHuManCar(const cv::Mat &source, int res,
std::vector<vides_data::ParkingArea> &currentPlate,std::map<int,int>&resMap,QString sSn) {
semaphore.acquire();
ScopeSemaphoreExit guard([this]() {
semaphore.release(); // 释放信号量
});
HumanDetection* selectedHumanDetection = schedulingAlgorithm(sSn);
if (selectedHumanDetection!=nullptr) {
// 调用选定对象的findHuManCar函数
qInfo() << "调度算法抢到===>sn"<<sSn<<"res"<<res;
int detectionResult = selectedHumanDetection->findHuManCar(source, res,resMap, currentPlate);
return detectionResult;
} else {
qDebug() << "没有可用的HumanDetection对象可以调度";
return -2;
}
}
#ifndef HUMANDETECTIONMANAGE_H
#define HUMANDETECTIONMANAGE_H
#include "HumanDetection.h"
#include "Common.h"
#include "VidesData.h"
#include "ScopeSemaphoreExit.h"
#include <QWaitCondition>
#include <QMutex>
#include <QThread>
#include <random>
#include <QSemaphore>
#include <vector>
#include <opencv2/opencv.hpp>
class HumanDetectionManage{
public:
HumanDetectionManage(int humanDetectionLen);
~HumanDetectionManage();
static HumanDetectionManage& getInstance(int humanDetectionLen)
{
static HumanDetectionManage instance(humanDetectionLen);
return instance;
}
void initHumanDetectionManage(const QString &modelPaths,
float carShapeConfidence,float &height_reference,int &uniformColor);
int executeFindHuManCar(const cv::Mat &source,int res,std::vector<vides_data::ParkingArea> &currentPlate,
std::map<int,int>&resMap, QString sSn);
HumanDetection *schedulingAlgorithm(QString sSn);
private:
static HumanDetectionManage* instance;
std::vector<HumanDetection*>humanDetections;
int humanDetectionLen;
QSemaphore semaphore;
QWaitCondition waitCondition;
QMutex mutex;
};
#endif // HUMANDETECTIONMANAGE_H
...@@ -6,27 +6,11 @@ ...@@ -6,27 +6,11 @@
LicensePlateRecognition::LicensePlateRecognition() {} LicensePlateRecognition::LicensePlateRecognition() {}
LicensePlateRecognition::~LicensePlateRecognition(){ LicensePlateRecognition::~LicensePlateRecognition(){
HLPR_ReleaseContext(ctx);
} }
LicensePlateRecognition* LicensePlateRecognition::instance = nullptr; LicensePlateRecognition* LicensePlateRecognition::instance = nullptr;
//void LicensePlateRecognition::initHlprContext(const QString &modelPaths, const QString &carCascade, float carConfidence){
// HLPR_ContextConfiguration configuration = {0};
// QByteArray && by_mpath=modelPaths.toUtf8();
// char* m_path=by_mpath.data();
// configuration.models_path = m_path;
// configuration.max_num = 5;
// configuration.det_level = DETECT_LEVEL_LOW;
// configuration.use_half = false;
// configuration.nms_threshold = 0.5f;
// configuration.rec_confidence_threshold = carConfidence;
// configuration.box_conf_threshold = 0.30f;
// configuration.threads = 1;
// this->carCascadeUrl=carCascade;
// ctx = HLPR_CreateContext(&configuration);
//}
void LicensePlateRecognition::oldLicensePlateNumber(const cv::Mat &source,const QString &modelPaths,QString & lpNumber){ void LicensePlateRecognition::oldLicensePlateNumber(const cv::Mat &source,const QString &modelPaths,QString & lpNumber){
HLPR_ImageData data = {0}; HLPR_ImageData data = {0};
data.data = source.data; data.data = source.data;
...@@ -112,8 +96,26 @@ void LicensePlateRecognition::filterLicensePlateConfidenceMax(vides_data::reques ...@@ -112,8 +96,26 @@ void LicensePlateRecognition::filterLicensePlateConfidenceMax(vides_data::reques
} }
} }
} }
void LicensePlateRecognition::initHlprContext(const QString &modelPaths, float carConfidence){
if(ctx==nullptr){
HLPR_ContextConfiguration configuration = {0};
QByteArray && by_mpath=modelPaths.toUtf8();
char* m_path=by_mpath.data();
configuration.models_path = m_path;
configuration.max_num = 5;
configuration.det_level = DETECT_LEVEL_LOW;
configuration.use_half = false;
configuration.nms_threshold = 0.5f;
configuration.rec_confidence_threshold = carConfidence;
configuration.box_conf_threshold = 0.30f;
configuration.threads = 1;
ctx = HLPR_CreateContext(&configuration);
}
}
void LicensePlateRecognition::licensePlateNumber(const cv::Mat &source, QString &lpNumber,vides_data::requestLicensePlate &plate, void LicensePlateRecognition::licensePlateNumber(const cv::Mat &source, QString &lpNumber,vides_data::requestLicensePlate &plate,
qint64 currentTime,P_HLPR_Context ctx) { qint64 currentTime) {
// 执行一帧图像数据检测行人 // 执行一帧图像数据检测行人
......
...@@ -23,23 +23,20 @@ public: ...@@ -23,23 +23,20 @@ public:
} }
//识别车牌号 //识别车牌号
void licensePlateNumber(const cv::Mat &source,QString & lpNumber, vides_data::requestLicensePlate &plate, void licensePlateNumber(const cv::Mat &source,QString & lpNumber, vides_data::requestLicensePlate &plate,
qint64 currentTime,P_HLPR_Context ctx); qint64 currentTime);
void filterLicensePlateConfidenceMax(vides_data::requestLicensePlate &plate,vides_data::LicensePlate &max); void filterLicensePlateConfidenceMax(vides_data::requestLicensePlate &plate,vides_data::LicensePlate &max);
void oldLicensePlateNumber(const cv::Mat &source,const QString &modelPaths,QString & lpNumber); void oldLicensePlateNumber(const cv::Mat &source,const QString &modelPaths,QString & lpNumber);
// void initHlprContext(const QString &modelPaths,const QString &carCascade,float carConfidence); void initHlprContext(const QString &modelPaths,float carConfidence);
void replaceWith1And0( QString &code); void replaceWith1And0( QString &code);
private: private:
static LicensePlateRecognition* instance; static LicensePlateRecognition* instance;
//P_HLPR_Context ctx ;
float carConfidence;
std::mutex carMutex;
P_HLPR_Context ctx=nullptr ;
LicensePlateRecognition(); LicensePlateRecognition();
......
...@@ -211,56 +211,58 @@ int MediaFaceImage::ToFile(const char* pFileName, const void* pData, int nLength ...@@ -211,56 +211,58 @@ int MediaFaceImage::ToFile(const char* pFileName, const void* pData, int nLength
fclose(fp); fclose(fp);
return 0; return 0;
} }
int MediaFaceImage::FaceImageCallBack(XSDK_HANDLE hMedia, int nChannel, cv::Mat &image) { //int MediaFaceImage::FaceImageCallBack(XSDK_HANDLE hMedia, int nChannel, cv::Mat &image) {
const int BufferSize = 1024 * 1024 * 2; // 定义缓冲区大小 // const int BufferSize = 1024 * 1024 * 2; // 定义缓冲区大小
// image.release();
// // 使用智能指针管理资源
// std::unique_ptr<unsigned char[]> pOutBuffer(new unsigned char[BufferSize]);
// int pInOutBufferSize = 0;
// int ret = XSDK_DevSnapSyn(hMedia, nChannel, "", pOutBuffer.get(), &pInOutBufferSize);
// if (ret < 0 || pInOutBufferSize <= 0) {
// qInfo() << "同步设备端抓图失败";
// return -1;
// }
// 使用智能指针管理资源 // // 使用vector管理buffer
std::unique_ptr<unsigned char[]> pOutBuffer(new unsigned char[BufferSize]); // std::vector<uchar> buffer(pInOutBufferSize);
// memcpy(buffer.data(), pOutBuffer.get(), pInOutBufferSize);
// image =std::move(cv::imdecode(buffer, cv::IMREAD_UNCHANGED));
// return pInOutBufferSize; // pOutBuffer由智能指针管理,此处无需手动释放
//}
int MediaFaceImage::FaceImageCallBack(XSDK_HANDLE hMedia, int nChannel, cv::Mat &image) {
const int BufferSize = 1024 * 1024 * 2; // 缓冲区大小
image.release(); // 释放之前的图像
std::unique_ptr<unsigned char[]> pOutBuffer(new unsigned char[BufferSize]); // 智能指针管理内存
int pInOutBufferSize = 0; int pInOutBufferSize = 0;
int ret = XSDK_DevSnapSyn(hMedia, nChannel, "", pOutBuffer.get(), &pInOutBufferSize); int ret = XSDK_DevSnapSyn(hMedia, nChannel, "", pOutBuffer.get(), &pInOutBufferSize);
if (ret < 0 || pInOutBufferSize <= 0) { if (ret < 0 || pInOutBufferSize <= 0) {
qInfo() << "同步设备端抓图失败"; qInfo() << "同步设备端抓图失败";
return -1; return -1;
} }
// 使用vector管理buffer // 使用 std::vector 管理缓冲区数据
std::vector<uchar> buffer(pInOutBufferSize); std::vector<uchar> buffer(pInOutBufferSize);
memcpy(buffer.data(), pOutBuffer.get(), pInOutBufferSize); memcpy(buffer.data(), pOutBuffer.get(), pInOutBufferSize);
image = cv::imdecode(buffer, cv::IMREAD_UNCHANGED);
return pInOutBufferSize; // pOutBuffer由智能指针管理,此处无需手动释放 try {
} cv::Mat decodedImage = cv::imdecode(buffer, cv::IMREAD_UNCHANGED);
//int MediaFaceImage::FaceImageCallBack(XSDK_HANDLE hMedia, int nChannel, cv::Mat &image) if (decodedImage.empty()) {
//{ qInfo() << "图像解码失败";
// // static const int BufferSize = 1024 * 1024 * 2; return -1;
// // static unsigned char pOutBuffer[BufferSize]; }
// const int BufferSize = 1024 * 1024 * 2; image = std::move(decodedImage);
// unsigned char* pOutBuffer = new unsigned char[BufferSize]; } catch (const cv::Exception& e) {
qInfo() << "图像解码过程中捕获异常:" << e.what();
// int pInOutBufferSize = 0; return -1;
// int ret = XSDK_DevSnapSyn(hMedia, nChannel, "", pOutBuffer, &pInOutBufferSize); }
// if (ret < 0 || pInOutBufferSize<=0 ) {
// qInfo() << "同步设备端抓图失败";
// if (pOutBuffer)
// {
// delete[]pOutBuffer;
// pOutBuffer = nullptr;;
// }
// return -1;
// }
// std::vector<uchar> buffer(pInOutBufferSize); return pInOutBufferSize;
// memcpy(buffer.data(), pOutBuffer, pInOutBufferSize); }
// image =std::move(cv::imdecode(buffer, cv::IMREAD_UNCHANGED));;
// if (pOutBuffer)
// {
// delete[]pOutBuffer;
// pOutBuffer = nullptr;;
// }
// return pInOutBufferSize;
//}
int MediaFaceImage::CameraImage(XSDK_HANDLE hMedia,int nChannel,std::vector<uchar> &buffer){ int MediaFaceImage::CameraImage(XSDK_HANDLE hMedia,int nChannel,std::vector<uchar> &buffer){
static const int BufferSize = 1024 * 1024 * 2; // 2MB buffer size static const int BufferSize = 1024 * 1024 * 2; // 2MB buffer size
......
...@@ -9,7 +9,8 @@ ParkingSpaceInfo::ParkingSpaceInfo(){ ...@@ -9,7 +9,8 @@ ParkingSpaceInfo::ParkingSpaceInfo(){
} }
ParkingSpaceInfo::~ParkingSpaceInfo(){ ParkingSpaceInfo::~ParkingSpaceInfo(){
qInfo() << "ParkingSpaceInfo:关闭";
} }
void ParkingSpaceInfo::addQueue(RecognizedInfo &info){ void ParkingSpaceInfo::addQueue(RecognizedInfo &info){
QMutexLocker locker(&queueMutex); QMutexLocker locker(&queueMutex);
......
...@@ -8,6 +8,7 @@ TaskRunnable::TaskRunnable(std::function<void()> newTask, int hDevice, int chann ...@@ -8,6 +8,7 @@ TaskRunnable::TaskRunnable(std::function<void()> newTask, int hDevice, int chann
if(runFunction==SdkCallbackFunction){ if(runFunction==SdkCallbackFunction){
this->callbackFunction = newTask; this->callbackFunction = newTask;
} }
this->setAutoDelete(true); this->setAutoDelete(true);
} }
TaskRunnable::~TaskRunnable(){ TaskRunnable::~TaskRunnable(){
......
#include "TimeoutException.h"
TimeoutException::TimeoutException()
: std::runtime_error("Function execution timed out") {
}
TimeoutException::~TimeoutException(){
}
#ifndef TIMEOUTEXCEPTION_H
#define TIMEOUTEXCEPTION_H
#include <stdexcept>
class TimeoutException : public std::runtime_error {
public:
TimeoutException();
~TimeoutException();
};
#endif // TIMEOUTEXCEPTION_H
...@@ -20,6 +20,8 @@ namespace vides_data{ ...@@ -20,6 +20,8 @@ namespace vides_data{
constexpr const char *HEADER_TYPE_KAY="Content-Type"; constexpr const char *HEADER_TYPE_KAY="Content-Type";
constexpr const char *HEADER_TYPE_VALUE="application/json"; constexpr const char *HEADER_TYPE_VALUE="application/json";
constexpr const char *PROFLIE_TEST= "test"; constexpr const char *PROFLIE_TEST= "test";
constexpr const char *PROFLIE_DEV= "dev";
struct response struct response
{ {
int code; int code;
......
...@@ -12,46 +12,46 @@ TEMPLATE = app ...@@ -12,46 +12,46 @@ TEMPLATE = app
# depend on your compiler). Please consult the documentation of the # depend on your compiler). Please consult the documentation of the
# deprecated API in order to know how to port your code away from it. # deprecated API in order to know how to port your code away from it.
DEFINES += QT_DEPRECATED_WARNINGS DEFINES += QT_DEPRECATED_WARNINGS
DEFINES += APP_VERSION=\\\"1.0.2\\\" DEFINES += APP_VERSION=\\\"1.0.3\\\"
#QMAKE_LIBDIR += /usr/local/lib QMAKE_LIBDIR += /usr/local/lib
#INCLUDEPATH+=/usr/local/include/opencv4 INCLUDEPATH+=/usr/local/include/opencv4
#INCLUDEPATH+=/usr/local/include/hyperface INCLUDEPATH+=/usr/local/include/hyperface
#INCLUDEPATH+=/usr/local/include/hyper INCLUDEPATH+=/usr/local/include/hyper
#INCLUDEPATH+=/usr/local/include/XNetSDK INCLUDEPATH+=/usr/local/include/XNetSDK
#INCLUDEPATH+=/usr/local/include/human INCLUDEPATH+=/usr/local/include/human
#INCLUDEPATH+=/usr/local/include/CImg INCLUDEPATH+=/usr/local/include/CImg
unix:contains(QMAKE_HOST.arch, x86_64) { #unix:contains(QMAKE_HOST.arch, x86_64) {
QMAKE_LIBDIR += /home/mark/Public/x86_opencv/lib # QMAKE_LIBDIR += /home/mark/Public/x86_opencv/lib
} #}
unix:contains(QMAKE_HOST.arch, arm) { #unix:contains(QMAKE_HOST.arch, arm) {
QMAKE_LIBDIR += /usr/local/lib # QMAKE_LIBDIR += /usr/local/lib
} #}
# 根据编译器类型选择库路径和头文件路径 ## 根据编译器类型选择库路径和头文件路径
unix: { #unix: {
# x86 架构 # # x86 架构
contains(QMAKE_HOST.arch, x86_64) { # contains(QMAKE_HOST.arch, x86_64) {
INCLUDEPATH+=/home/mark/Public/x86_opencv/include/opencv4 # INCLUDEPATH+=/home/mark/Public/x86_opencv/include/opencv4
INCLUDEPATH+=/home/mark/Public/x86_opencv/include/hyperface # INCLUDEPATH+=/home/mark/Public/x86_opencv/include/hyperface
INCLUDEPATH+=/home/mark/Public/x86_opencv/include/hyper # INCLUDEPATH+=/home/mark/Public/x86_opencv/include/hyper
INCLUDEPATH+=/home/mark/Public/x86_opencv/include/XNetSDK # INCLUDEPATH+=/home/mark/Public/x86_opencv/include/XNetSDK
INCLUDEPATH+=/home/mark/Public/x86_opencv/include/human # INCLUDEPATH+=/home/mark/Public/x86_opencv/include/human
INCLUDEPATH+=/home/mark/Public/x86_opencv/include/CImg # INCLUDEPATH+=/home/mark/Public/x86_opencv/include/CImg
} # }
# ARM 架构 # # ARM 架构
contains(QMAKE_HOST.arch, arm) { # contains(QMAKE_HOST.arch, arm) {
INCLUDEPATH+=/usr/local/include/opencv4 # INCLUDEPATH+=/usr/local/include/opencv4
INCLUDEPATH+=/usr/local/include/hyperface # INCLUDEPATH+=/usr/local/include/hyperface
INCLUDEPATH+=/usr/local/include/hyper # INCLUDEPATH+=/usr/local/include/hyper
INCLUDEPATH+=/usr/local/include/XNetSDK # INCLUDEPATH+=/usr/local/include/XNetSDK
INCLUDEPATH+=/usr/local/include/human # INCLUDEPATH+=/usr/local/include/human
} # }
} #}
# You can also make your code fail to compile if it uses deprecated APIs. # You can also make your code fail to compile if it uses deprecated APIs.
# In order to do so, uncomment the following line. # In order to do so, uncomment the following line.
...@@ -83,7 +83,6 @@ LIBS += -lopencv_core \ ...@@ -83,7 +83,6 @@ LIBS += -lopencv_core \
#-lz #-lz
SOURCES += \ SOURCES += \
Common.cpp \ Common.cpp \
FaceReconition.cpp \
LogHandler.cpp \ LogHandler.cpp \
main.cpp \ main.cpp \
mainwindow.cpp \ mainwindow.cpp \
...@@ -96,11 +95,12 @@ SOURCES += \ ...@@ -96,11 +95,12 @@ SOURCES += \
CameraHandle.cpp \ CameraHandle.cpp \
ParkingSpaceInfo.cpp \ ParkingSpaceInfo.cpp \
HumanDetection.cpp \ HumanDetection.cpp \
ScopeSemaphoreExit.cpp ScopeSemaphoreExit.cpp \
FaceReconitionHandle.cpp \
HumanDetectionManage.cpp
HEADERS += \ HEADERS += \
Common.h \ Common.h \
FaceRecognition.h \
LogHandle.h \ LogHandle.h \
mainwindow.h \ mainwindow.h \
LicensePlateRecognition.h \ LicensePlateRecognition.h \
...@@ -113,7 +113,9 @@ HEADERS += \ ...@@ -113,7 +113,9 @@ HEADERS += \
CameraHandle.h \ CameraHandle.h \
ParkingSpaceInfo.h \ ParkingSpaceInfo.h \
HumanDetection.h \ HumanDetection.h \
ScopeSemaphoreExit.h ScopeSemaphoreExit.h \
FaceReconitionHandle.h \
HumanDetectionManage.h
#FORMS += \ #FORMS += \
# mainwindow.ui # mainwindow.ui
......
...@@ -7,6 +7,8 @@ MainWindow::MainWindow() ...@@ -7,6 +7,8 @@ MainWindow::MainWindow()
{ {
sp_this=this; sp_this=this;
LogHandler::Get().installMessageHandler(); LogHandler::Get().installMessageHandler();
QString inifile=QCoreApplication::applicationDirPath()+"/gameras.ini"; QString inifile=QCoreApplication::applicationDirPath()+"/gameras.ini";
...@@ -16,6 +18,9 @@ MainWindow::MainWindow() ...@@ -16,6 +18,9 @@ MainWindow::MainWindow()
modelPaths=qSetting->value("licensePlateRecognition/model_paths").toString(); modelPaths=qSetting->value("licensePlateRecognition/model_paths").toString();
initCommon(); initCommon();
QThreadPool* threadPool = QThreadPool::globalInstance();
threadPool->setMaxThreadCount(12);
deleteLogFileTimer =new QTimer(this); deleteLogFileTimer =new QTimer(this);
connect(deleteLogFileTimer, &QTimer::timeout, this, &MainWindow::deleteLogFile); connect(deleteLogFileTimer, &QTimer::timeout, this, &MainWindow::deleteLogFile);
...@@ -29,20 +34,35 @@ MainWindow::MainWindow() ...@@ -29,20 +34,35 @@ MainWindow::MainWindow()
initFaceFaceRecognition(); initFaceFaceRecognition();
FaceReconition &faceRecognition = FaceReconition::getInstance(); // FaceReconition &faceRecognition = FaceReconition::getInstance();
float confidence=qSetting->value("devices/confidence").toFloat(); // float confidence=qSetting->value("devices/confidence").toFloat();
int faceNumbers=qSetting->value("devices/faceNumbers").toInt(); // int faceNumbers=qSetting->value("devices/faceNumbers").toInt();
if(localImageMap.size()>0){ // if(localImageMap.size()>0){
faceRecognition.initSourceImageMap(localImageMap,faceNumbers,confidence); // faceRecognition.initSourceImageMap(localImageMap,faceNumbers,confidence);
} // }
//LicensePlateRecognition &licensePlateRecogn =LicensePlateRecognition::getInstance(); float heightReference=qSetting->value("devices/height_reference").toFloat();
//licensePlateRecogn.initHlprContext(modelPaths,qSetting->value("licensePlateRecognition/car_cascade_path").toString(),carConfidence); int uniformColor=qSetting->value("devices/uniformColor").toInt();
int humanDetectionLen=qSetting->value("devices/humanDetectionLen").toInt();
float carShapeConfidence=qSetting->value("devices/carShapeConfidence").toFloat();
Common & instace= Common::getInstance();
instace.setHumanDetectionLen(humanDetectionLen);
float carConfidence=qSetting->value("devices/carConfidence").toFloat();
LicensePlateRecognition &licensePlateRecogn =LicensePlateRecognition::getInstance();
licensePlateRecogn.initHlprContext(modelPaths,carConfidence);
HumanDetectionManage &humanDetectionManage= HumanDetectionManage::getInstance(humanDetectionLen);
humanDetectionManage.initHumanDetectionManage(modelPaths,carShapeConfidence,heightReference,uniformColor);
QString httpurl; QString httpurl;
QString profile=qSetting->value("cloudservice/profile","test").toString(); QString profile=qSetting->value("cloudservice/profile","test").toString();
if(strcmp(profile.toUtf8().data(),vides_data::PROFLIE_TEST)==0){ if(strcmp(profile.toUtf8().data(),vides_data::PROFLIE_TEST)==0 ){
httpurl=qSetting->value("cloudservice/test_http").toString(); httpurl=qSetting->value("cloudservice/test_http").toString();
}else if(strcmp(profile.toUtf8().data(),vides_data::PROFLIE_DEV)==0 ) {
httpurl=qSetting->value("cloudservice/dev_http").toString();
}else{ }else{
httpurl=qSetting->value("cloudservice/pro_http").toString(); httpurl=qSetting->value("cloudservice/pro_http").toString();
} }
...@@ -65,6 +85,7 @@ MainWindow::MainWindow() ...@@ -65,6 +85,7 @@ MainWindow::MainWindow()
},Qt::QueuedConnection); },Qt::QueuedConnection);
this->startCamera(httpurl); this->startCamera(httpurl);
batchUpdatesCameraImageMap();
// 设置定时器间隔 // 设置定时器间隔
dePermissionSynTimer->setInterval(dePermissionTimer); dePermissionSynTimer->setInterval(dePermissionTimer);
...@@ -272,22 +293,47 @@ void MainWindow::updateLocalFace(const QString &httpurl) { ...@@ -272,22 +293,47 @@ void MainWindow::updateLocalFace(const QString &httpurl) {
} }
} }
FaceReconition &faceRecognition = FaceReconition::getInstance();
if (isChanged) { if (isChanged) {
if (cloudImageMap.empty()) { if (cloudImageMap.empty()) {
// 如果云端映射现在为空,移除所有特征 // 如果云端映射现在为空,移除所有特征
faceRecognition.featureRemove(); //faceRecognition.featureRemove();
batchUpdatesFeatureRemove();
} else { } else {
float confidence=qSetting->value("devices/confidence").toFloat(); //float confidence=qSetting->value("devices/confidence").toFloat();
int faceNumbers=qSetting->value("devices/faceNumbers").toInt(); //int faceNumbers=qSetting->value("devices/faceNumbers").toInt();
qDebug()<<"startMap != endMap-->"; qDebug()<<"startMap != endMap-->";
faceRecognition.initSourceImageMap(localImageMap,faceNumbers, confidence); // faceRecognition.initSourceImageMap(localImageMap,faceNumbers, confidence);
batchUpdatesCameraImageMap();
} }
} }
for (vides_data::responseFaceReconition* data : datas)
{
instance.deleteObj(data);
}
datas.clear(); // 清空列表
instance.deleteObj(res); instance.deleteObj(res);
} }
void MainWindow::batchUpdatesCameraImageMap(){
float confidence=qSetting->value("devices/confidence").toFloat();
int faceNumbers=qSetting->value("devices/faceNumbers").toInt();
for(auto iter = faceDetectionParkingPushs.begin(); iter != faceDetectionParkingPushs.end(); ++iter) {
CameraHandle*value= iter->second;
if(localImageMap.size()>0){
value->notificationUpdateImageMap(localImageMap,faceNumbers,confidence);
}
}
}
void MainWindow::batchUpdatesFeatureRemove(){
for(auto iter = faceDetectionParkingPushs.begin(); iter != faceDetectionParkingPushs.end(); ++iter) {
CameraHandle*value= iter->second;
if(localImageMap.size()>0){
value->featureRemove();
}
}
}
void MainWindow::findLocalSerialNumber(QString &serialNumber){ void MainWindow::findLocalSerialNumber(QString &serialNumber){
if(vides_data::isVirtualMachine()){ if(vides_data::isVirtualMachine()){
serialNumber = QSysInfo::machineUniqueId(); serialNumber = QSysInfo::machineUniqueId();
...@@ -357,10 +403,17 @@ void MainWindow::startCamera(const QString &httpurl){ ...@@ -357,10 +403,17 @@ void MainWindow::startCamera(const QString &httpurl){
std::map<QString,vides_data::localDeviceStatus*> localDevices; std::map<QString,vides_data::localDeviceStatus*> localDevices;
mediaFaceImage->SdkSearchDevicesSyn(localDevices); mediaFaceImage->SdkSearchDevicesSyn(localDevices);
if(localDevices.size()<=0){ if(localDevices.size()<=0){
httpService.setHttpUrl(httpurl);
vides_data::response *res=httpService.httpPostDeviceStatus(reStatus);
if(res->code!=0){
qInfo()<<"盒子状态上报失败 code:"<<res->code<<"msg:"<<res->msg;
}
instace.deleteObj(re); instace.deleteObj(re);
instace.deleteObj(res);
return ; return ;
} }
int alg=devices.algorithm; int alg=devices.algorithm;
for (const auto& device : devices.list) { for (const auto& device : devices.list) {
if(localDevices.count(device.sSn)>0 ){ if(localDevices.count(device.sSn)>0 ){
vides_data::localDeviceStatus* localDevice= localDevices.at(device.sSn); vides_data::localDeviceStatus* localDevice= localDevices.at(device.sSn);
...@@ -407,6 +460,7 @@ void MainWindow::startCamera(const QString &httpurl){ ...@@ -407,6 +460,7 @@ void MainWindow::startCamera(const QString &httpurl){
} }
} }
} }
this->deleteCloudNotCamer(localDevices, devices.list); this->deleteCloudNotCamer(localDevices, devices.list);
for (auto& pair : localDevices) { for (auto& pair : localDevices) {
...@@ -417,6 +471,7 @@ void MainWindow::startCamera(const QString &httpurl){ ...@@ -417,6 +471,7 @@ void MainWindow::startCamera(const QString &httpurl){
// 清空 localDevices 容器 // 清空 localDevices 容器
localDevices.clear(); localDevices.clear();
} }
httpService.setHttpUrl(httpurl); httpService.setHttpUrl(httpurl);
vides_data::response *res=httpService.httpPostDeviceStatus(reStatus); vides_data::response *res=httpService.httpPostDeviceStatus(reStatus);
if(res->code!=0){ if(res->code!=0){
...@@ -425,6 +480,7 @@ void MainWindow::startCamera(const QString &httpurl){ ...@@ -425,6 +480,7 @@ void MainWindow::startCamera(const QString &httpurl){
instace.deleteObj(res); instace.deleteObj(res);
updateLocalFace(httpurl); updateLocalFace(httpurl);
instace.deleteObj(re); instace.deleteObj(re);
...@@ -760,15 +816,11 @@ __uint8_t MainWindow::intToUint8t(int algorithm){ ...@@ -760,15 +816,11 @@ __uint8_t MainWindow::intToUint8t(int algorithm){
void MainWindow::initCameras(vides_data::cameraParameters &parameter,const std::list<vides_data::responseArea>&areas,int algorithm,std::list<vides_data::requestCameraInfo>&camera_info_list){ void MainWindow::initCameras(vides_data::cameraParameters &parameter,const std::list<vides_data::responseArea>&areas,int algorithm,std::list<vides_data::requestCameraInfo>&camera_info_list){
MediaFaceImage* mediaFaceImage= MediaFaceImage::getInstance(); MediaFaceImage* mediaFaceImage= MediaFaceImage::getInstance();
float carConfidence=qSetting->value("devices/carConfidence").toFloat();
int image_save=qSetting->value("devices/image_save").toInt(); int image_save=qSetting->value("devices/image_save").toInt();
float heightReference=qSetting->value("devices/height_reference").toFloat();
float carShapeConfidence=qSetting->value("devices/carShapeConfidence").toFloat();
CameraHandle * cameraHandle =new CameraHandle(parameter.sDevId,parameter.httpUrl,parameter.sSn,parameter.channel,modelPaths,carConfidence,carShapeConfidence,image_save); CameraHandle * cameraHandle =new CameraHandle(parameter.sDevId,parameter.httpUrl,parameter.sSn,parameter.channel,image_save);
int sdk_handle=cameraHandle->sdkDevLoginSyn(parameter.sDevId,parameter.nDevPort,parameter.sUserName,parameter.sPassword,10000); int sdk_handle=cameraHandle->sdkDevLoginSyn(parameter.sDevId,parameter.nDevPort,parameter.sUserName,parameter.sPassword,3000);
qDebug()<<"句柄为2:"<<sdk_handle; qDebug()<<"句柄为2:"<<sdk_handle;
if(sdk_handle<=0){ if(sdk_handle<=0){
qInfo() << "登录失败"; qInfo() << "登录失败";
...@@ -783,10 +835,10 @@ void MainWindow::initCameras(vides_data::cameraParameters &parameter,const std:: ...@@ -783,10 +835,10 @@ void MainWindow::initCameras(vides_data::cameraParameters &parameter,const std::
uint64 face_frequency=qSetting->value("devices/face_frequency").toULongLong(); uint64 face_frequency=qSetting->value("devices/face_frequency").toULongLong();
cameraHandle->initSdkRealTimeDevSnapSyn(sdk_handle,synTime,face_frequency); cameraHandle->initSdkRealTimeDevSnapSyn(sdk_handle,synTime,face_frequency);
cameraHandle->initAlgorithmParameter(heightReference);
QString pwd="admin2024"; // QString pwd="admin2024";
QString sid="MERCURY_8C4F"; // QString sid="MERCURY_8C4F";
cameraHandle->sdkWifi(pwd,sid); // cameraHandle->sdkWifi(pwd,sid);
vides_data::requestCameraInfo camera_info; vides_data::requestCameraInfo camera_info;
camera_info.sSn=parameter.sSn; camera_info.sSn=parameter.sSn;
camera_info.ip_addr=parameter.sDevId; camera_info.ip_addr=parameter.sDevId;
......
...@@ -2,14 +2,12 @@ ...@@ -2,14 +2,12 @@
#define MAINWINDOW_H #define MAINWINDOW_H
#include "Common.h" #include "Common.h"
#include "FaceRecognition.h"
#include "LicensePlateRecognition.h" #include "LicensePlateRecognition.h"
#include "hyper_lpr_sdk.h"
#include "CameraHandle.h" #include "CameraHandle.h"
#include "HttpService.h" #include "HttpService.h"
#include "VidesData.h" #include "VidesData.h"
#include "MediaFaceImage.h" #include "MediaFaceImage.h"
#include "HumanDetection.h" #include "HumanDetectionManage.h"
#include <algorithm> #include <algorithm>
#include <QString> #include <QString>
#include <QTextCodec> #include <QTextCodec>
...@@ -46,6 +44,7 @@ public: ...@@ -46,6 +44,7 @@ public:
void initCameras(vides_data::cameraParameters &parameter, void initCameras(vides_data::cameraParameters &parameter,
const std::list<vides_data::responseArea>&areas,int algorithm,std::list<vides_data::requestCameraInfo>&camera_info_list); const std::list<vides_data::responseArea>&areas,int algorithm,std::list<vides_data::requestCameraInfo>&camera_info_list);
__uint8_t intToUint8t(int algorithm); __uint8_t intToUint8t(int algorithm);
static MainWindow * sp_this; static MainWindow * sp_this;
...@@ -78,7 +77,12 @@ public: ...@@ -78,7 +77,12 @@ public:
// 过滤函数 // 过滤函数
void deleteCloudNotCamer (const std::map<QString,vides_data::localDeviceStatus*>& localDevices, void deleteCloudNotCamer (const std::map<QString,vides_data::localDeviceStatus*>& localDevices,
const std::list<vides_data::responseDeviceStatus>& devices); const std::list<vides_data::responseDeviceStatus>& devices);
void batchUpdatesCameraImageMap();
void batchUpdatesFeatureRemove();
~MainWindow(); ~MainWindow();
signals: signals:
void shutdownSignals(QString sDevId, int nDevPort); void shutdownSignals(QString sDevId, int nDevPort);
...@@ -116,5 +120,6 @@ private: ...@@ -116,5 +120,6 @@ private:
QString modelPaths; QString modelPaths;
std::map<QString,CameraHandle*>faceDetectionParkingPushs; std::map<QString,CameraHandle*>faceDetectionParkingPushs;
}; };
#endif // MAINWINDOW_H #endif // MAINWINDOW_H
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment