Commit 6b53f770 by “liusq”

人形调度器负责分配线程

parent 5fe7062a
#ifndef CAMERAHANDLE_H
#define CAMERAHANDLE_H
#include "RecognitionInfo.h"
#include "FaceRecognition.h"
#include "FaceReconitionHandle.h"
#include "HttpService.h"
#include "LicensePlateRecognition.h"
#include "Json_Header/AlarmInfo.h"
......@@ -16,7 +16,6 @@
#include "Json_Header/OPMachine.h"
#include "mainwindow.h"
#include "ParkingSpaceInfo.h"
#include "so_human_sdk.h"
#include "hyper_lpr_sdk.h"
#include <QPolygon>
#include <QPainterPath>
......@@ -41,13 +40,12 @@ enum CAR_INFORMATION {
class CameraHandle: public QObject {
Q_OBJECT
public:
CameraHandle(QString &url,QString &httpUrl,QString &sSn, int &channel,
const QString &modelPaths,
float carConfidence,float carShapeConfidence, int imageSave);
CameraHandle(QString &url,QString &httpUrl,QString &sSn, int &channel, int imageSave);
CameraHandle();
~CameraHandle();
int sdkDevLoginSyn(QString sDevId, int nDevPort, QString sUserName, QString sPassword, int nTimeout);
//int SdkMediaGetFaceImage(int hDevice, int nSeq, int nTimeout);
int sdkDevSetAlarmListener(XSDK_HANDLE hDevice, int bListener);
int getHdevice();
......@@ -56,10 +54,13 @@ public:
void clearCameraHandle();
void initAlgorithmParameter(float &height_reference);
// void rebindTimer(int hDevice);
void initSdkRealTimeDevSnapSyn(int hDevice,int syn_timer,uint64 face_frequency);
void notificationUpdateImageMap(std::map<QString,QString>&maps,int numberFaces,float confidence);
void featureRemove();
void updateImage(const cv::Mat & frame,qint64 currentTime);
void matToBase64(const cv::Mat &image, QByteArray &base64Data);
......@@ -158,6 +159,8 @@ private :
std::map<QString, QString> currentData;
FaceReconitionHandle *faceReconitionHandle;
//每个区域编号对应一个区域信息
std::map<int,ParkingSpaceInfo*>parkMap;
//当前相机监视所以车位区域
......@@ -172,10 +175,6 @@ private :
int offlineCount=0;
TCV_HumanDetector *detector;
P_HLPR_Context ctx ;
QSemaphore semaphore;
int image_save;
......
......@@ -76,6 +76,13 @@ float Common::getCarConfidenceMin() const{
void Common::setCarConfidenceMin(float carConfidenceMin){
this->carConfidenceMin=carConfidenceMin;
}
int Common::getHumanDetectionLen() const{
return humanDetectionLen;
}
void Common::setHumanDetectionLen(int humanDetectionLen){
this->humanDetectionLen=humanDetectionLen;
}
QString Common::GetLocalIp() {
QString ipAddress;
QList<QHostAddress> list = QNetworkInterface::allAddresses();
......
......@@ -50,6 +50,14 @@ public:
float getCarConfidenceMin() const;
void setCarConfidenceMin(float carConfidenceMin);
int getHumanDetectionLen() const;
void setHumanDetectionLen(int humanDetectionLen);
template <typename T>
const T& clamp(const T& v, const T& lo, const T& hi)
{
return (v < lo) ? lo : (hi < v) ? hi : v;
}
template<typename T>
void deleteObj(T*& obj) {
if(obj != nullptr) {
......@@ -64,6 +72,7 @@ private:
QString images;
float carConfidenceMax;
float carConfidenceMin;
int humanDetectionLen;
Common();
~Common();
......
//#ifndef FACEDETECTIONPARKINGPUSHIMPL_H
//#define FACEDETECTIONPARKINGPUSHIMPL_H
//#include "XSDKPublic.h"
//#include "FaceDetectionParkingPush.h"
//#include "XNetSDKDefine.h"
//#include "Common.h"
//#include "CameraThread.h"
//#include "MediaFaceImage.h"
//class FaceDetectionParkingPushImpl {
//public:
// FaceDetectionParkingPushImpl(FaceDetectionParkingPush* parent,QString &framePath, QString &url);
// int SdkInit(QString &szConfigPath, QString &szTempPath);
// XSDK_HANDLE SdkDevLoginSyn(QString sDevId, int nDevPort, QString sUserName, QString sPassword, int nTimeout);
// XSDK_HANDLE SdkMediaGetFaceImage(XSDK_HANDLE hDevice, int nSeq, int nTimeout);
// int callbackFunction(XSDK_HANDLE hObject, int nMsgId, int nParam1, int nParam2, int nParam3, const char* szString, void* pObject, int64 lParam, int nSeq, void* pUserData, void* pMsg);
// CameraThread *getCameraThread();
//private:
// SXSDKInitParam *pParam;
// SXSDKLoginParam *loginParam;
// SXMediaFaceImageReq *sxMediaFaceImageReq;
// CameraThread *cameraThread;
// QString framePath;
// QString url;
// FaceDetectionParkingPush* parent;
//};
//#endif // FACEDETECTIONPARKINGPUSHIMPL_H
#include "FaceRecognition.h"
#include "FaceReconitionHandle.h"
#include <QImage>
#include <QThread>
#include <iostream>
#define cimg_display 0
#include "CImg.h"
using namespace cimg_library;
FaceReconitionHandle::FaceReconitionHandle() {
}
FaceReconition::FaceReconition() {}
FaceReconition::~FaceReconition(){
FaceReconitionHandle::~FaceReconitionHandle(){
if (ctxHandle != nullptr) {
HF_ReleaseFaceContext(ctxHandle);
ctxHandle = nullptr;
}
}
FaceReconition* FaceReconition::instance = nullptr;
cv::Mat FaceReconition::loadImage(const QString &path) {
cv::Mat FaceReconitionHandle::loadImage(const QString &path) {
// 尝试使用OpenCV直接加载图像
std::string stdPath = path.toStdString();
cv::Mat image = cv::imread(stdPath, cv::IMREAD_COLOR);
......@@ -35,8 +33,8 @@ cv::Mat FaceReconition::loadImage(const QString &path) {
return loadImageFromByteStream(path);
}
void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numberFaces,float confidence){
//QWriteLocker locker(&rwLock);
void FaceReconitionHandle::initSourceImageMap(std::map<QString,QString>&maps,int numberFaces,float confidence){
QWriteLocker locker(&rwLock);
featureRemove();
HResult ret;
// 初始化context
......@@ -135,7 +133,7 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe
}
}
int FaceReconition::featureRemove(){
void FaceReconitionHandle::featureRemove(){
if(customIds.size()>0){
for(auto customId:customIds){
HResult ret= HF_FeaturesGroupFeatureRemove(ctxHandle,customId);
......@@ -145,7 +143,7 @@ int FaceReconition::featureRemove(){
}
cv::Mat FaceReconition::loadImageFromByteStream(const QString& filePath) {
cv::Mat FaceReconitionHandle::loadImageFromByteStream(const QString& filePath) {
try {
// 使用 CImg 读取 JPEG 图像
......@@ -179,8 +177,9 @@ cv::Mat FaceReconition::loadImageFromByteStream(const QString& filePath) {
void FaceReconition::doesItExistEmployee(const cv::Mat &source,std::list<vides_data::faceRecognitionResult>&faces){
//QReadLocker locker(&rwLock);
void FaceReconitionHandle::doesItExistEmployee(const cv::Mat &source,std::list<vides_data::faceRecognitionResult>&faces){
QReadLocker locker(&rwLock);
HResult ret;
HF_ContextCustomParameter parameter = {0};
HF_ImageData imageData = {0};
......
#ifndef FACERECOGNITION_H
#define FACERECOGNITION_H
#ifndef FACERECONITIONHANDLE_H
#define FACERECONITIONHANDLE_H
#include "hyperface.h"
#include "herror.h"
#include "LogHandle.h"
#include "VidesData.h"
#include <opencv2/opencv.hpp>
#include <QReadWriteLock>
#include<QCoreApplication>
class FaceReconition
class FaceReconitionHandle
{
private:
static FaceReconition* instance;
HContextHandle ctxHandle=nullptr;
float configConfidence;
std::vector<int32_t>customIds;
FaceReconition();
~FaceReconition();
QReadWriteLock rwLock;
public:
static FaceReconition& getInstance()
{
static FaceReconition instance;
return instance;
}
FaceReconitionHandle();
~FaceReconitionHandle();
cv::Mat loadImage(const QString &path);
cv::Mat loadImageFromByteStream(const QString& filePath);
......@@ -39,8 +33,7 @@ public:
void initSourceImageMap(std::map<QString,QString>&maps,int numberFaces,float confidence);
int featureRemove();
void featureRemove();
};
#endif // FACERECOGNITION_H
#endif // FACERECONITIONHANDLE_H
......@@ -354,7 +354,11 @@ vides_data::response *HttpService::httpPostFacePopulation(QByteArray &img,int &h
resp->code=map["code"].toInt();
resp->msg=map["message"].toString();
}else{
qDebug()<<"httpPostFacePopulation===>";
qDebug()<<m_httpClient.errorCode();
qDebug()<<m_httpClient.errorString();
qDebug()<<"httpPostFacePopulation===>end";
resp->code=2;
resp->msg=OPERATION_FAILED;
}
......@@ -502,6 +506,8 @@ vides_data::response *HttpService::httpDownload( const QString &filePath,QString
resp->msg=map["message"].toString();
}else{
qDebug()<<m_httpClient.errorCode();
qDebug()<<m_httpClient.errorCode();
resp->code=2;
resp->msg=OPERATION_FAILED;
}
......
#include "Common.h"
#include "HumanDetection.h"
HumanDetection* HumanDetection::instance = nullptr;
HumanDetection::HumanDetection() : height_reference(250.0f) {
#include <QDateTime>
HumanDetection::HumanDetection(const QString &modelPaths,
float carShapeConfidence) : heightReference(250.0f),thread_time(0) {
QByteArray && by_mpath=modelPaths.toUtf8();
char* m_path=by_mpath.data();
detector = TCV_CreateHumanDetector(m_path,1);
TCV_HumanDetectorSetHumanThreshold(detector,0.5f);
TCV_HumanDetectorSetCarThreshold(detector,carShapeConfidence);
}
HumanDetection::~HumanDetection(){
if(detector!=nullptr){
TCV_ReleaseHumanDetector(detector);
detector=nullptr;
}
}
void HumanDetection::draw_human_on_image(const cv::Mat& image, const TCV_ObjectLocation* boxes, int size) {
for (int i = 0; i < size; ++i) {
const auto& box = boxes[i];
......@@ -59,109 +70,36 @@ void HumanDetection::draw_human_on_image(const cv::Mat& image, const TCV_ObjectL
}
}
void HumanDetection::setHuManParameter(float &height_reference,int &uniformColor){
this->heightReference=height_reference;
this->uniformColor=uniformColor;
}
qint64 HumanDetection::getThreadTime()const{
return thread_time.load(std::memory_order_relaxed);
}
void HumanDetection::setHeightReference(float &height_reference){
this->height_reference=height_reference;
bool HumanDetection::getIsRunning()const{
return isRunning.load(std::memory_order_relaxed);
}
//0 人形 1 车形 2 工服
int HumanDetection::findHuManCar(const cv::Mat &source, int res,std::map<int,int>&reMap, std::vector<vides_data::ParkingArea> &currentPlate) {
isRunning.store(true, std::memory_order_relaxed);
thread_time.store(QDateTime::currentMSecsSinceEpoch(), std::memory_order_relaxed);
//int HumanDetection::findHuManCar(const cv::Mat &source,int res,TCV_HumanDetector *detector,
// std::vector<vides_data::ParkingArea> &currentPlate){
// TCV_CameraStream *stream = TCV_CreateCameraStream();
// TCV_CameraStreamSetData(stream, source.data, source.cols, source.rows);
// TCV_CameraStreamSetRotationMode(stream, TCV_CAMERA_ROTATION_0);
// TCV_CameraStreamSetStreamFormat(stream, TCV_STREAM_BGR);
// //0是人 1是车
// // 执行一帧目标检测
// TCV_HumanDetectorProcessFrame(detector, stream);
// int num=0;
// if(res==0x00 || res==0x02){
// num= TCV_HumanDetectorGetNumOfHuman(detector);
// if (num > 0 && res==0x02) {
// // 创建一个接收检测结果的对象数组
// TCV_ObjectLocation result[num];
// // 提取行人检测结果
// TCV_HumanDetectorGetHumanLocation(detector, result, num);
// int num_uniforms = 0;
// //工服
// for (int i = 0; i < num; ++i) {
// if (result[i].uniform == 0 && std::abs(result[i].y2 - result[i].y1)>=height_reference) {
// vides_data::ParkingArea area;
// area.topLeftCornerX=result[i].x1;
// area.topLeftCornerY=result[i].y1;
// area.bottomLeftCornerX=result[i].x1;
// area.bottomLeftCornerY=result[i].y2;
// area.topRightCornerX=result[i].x2;
// area.topRightCornerY=result[i].y1;
// area.bottomRightCornerX=result[i].x2;
// area.bottomRightCornerY=result[i].y2;
// currentPlate.push_back(area);
// ++num_uniforms;
// }
// }
// num=num_uniforms;
// }
// if( num > 0 && res==0x00){
// // 创建一个接收检测结果的对象数组
// TCV_ObjectLocation result[num];
// // 提取行人检测结果
// TCV_HumanDetectorGetHumanLocation(detector, result, num);
// int human_size = 0;
// //工服
// for (int i = 0; i < num; ++i) {
// if (std::abs(result[i].y2 - result[i].y1)>=height_reference) {
// vides_data::ParkingArea area;
// area.topLeftCornerX=result[i].x1;
// area.topLeftCornerY=result[i].y1;
// area.bottomLeftCornerX=result[i].x1;
// area.bottomLeftCornerY=result[i].y2;
// area.topRightCornerX=result[i].x2;
// area.topRightCornerY=result[i].y1;
// area.bottomRightCornerX=result[i].x2;
// area.bottomRightCornerY=result[i].y2;
// currentPlate.push_back(area);
// ++human_size;
// }
// }
// num=human_size;
// }
// qDebug() << (res == 0 ? "findHuManCar 检测到的人数:" : "findHuManCar 未穿工服的人数:") << num;
// }else if (res==0x01) {
// num=TCV_HumanDetectorGetNumOfCar(detector);
// TCV_ObjectLocation resultCar[num];
// TCV_HumanDetectorGetCarLocation(detector,resultCar,num);
// for (int i = 0; i < num; ++i) {
// vides_data::ParkingArea area;
// area.topLeftCornerX=resultCar[i].x1;
// area.topLeftCornerY=resultCar[i].y1;
// area.bottomLeftCornerX=resultCar[i].x1;
// area.bottomLeftCornerY=resultCar[i].y2;
// area.topRightCornerX=resultCar[i].x2;
// area.topRightCornerY=resultCar[i].y1;
// area.bottomRightCornerX=resultCar[i].x2;
// area.bottomRightCornerY=resultCar[i].y2;
// currentPlate.push_back(area);
// qDebug() << "score 检测到的汽车数量匹配度:" << resultCar[i].score;
// }
// qDebug() << "findHuManCar 检测到的汽车数量:" << num;
// }else {
// qDebug() << "参数错误";
// }
// TCV_ReleaseCameraStream(stream);
// return num;
//}
int HumanDetection::findHuManCar(const cv::Mat &source, int res, TCV_HumanDetector *detector, std::vector<vides_data::ParkingArea> &currentPlate) {
TCV_CameraStream *stream = TCV_CreateCameraStream();
ScopeSemaphoreExit streamGuard([this, stream]() {
// 释放相机流
TCV_ReleaseCameraStream(stream);
isRunning.store(false, std::memory_order_relaxed);
});
TCV_CameraStreamSetData(stream, source.data, source.cols, source.rows);
TCV_CameraStreamSetRotationMode(stream, TCV_CAMERA_ROTATION_0);
TCV_CameraStreamSetStreamFormat(stream, TCV_STREAM_BGR);
......@@ -169,16 +107,19 @@ int HumanDetection::findHuManCar(const cv::Mat &source, int res, TCV_HumanDetect
int num = 0;
if (res == 0x00 || res == 0x02) {
num = TCV_HumanDetectorGetNumOfHuman(detector);
if (num == 0) return num; // 无行人检测结果,提前返回
std::vector<TCV_ObjectLocation> results(num);
TCV_HumanDetectorGetHumanLocation(detector, results.data(), num);
int count = 0;
int count_no_uniform = 0; // 未穿工服的行人数量
int count_all = 0; // 所有满足条件的行人数量
for (const auto &person : results) {
if ((res == 0x02 && person.uniform == 0) || res == 0x00) {
if (std::abs(person.y2 - person.y1) >= height_reference) {
int tenPlace = uniformColor / 10; // 十位
int onePlace = uniformColor % 10; // 个位
if (std::abs(person.y2 - person.y1) >= heightReference) {
vides_data::ParkingArea area;
area.topLeftCornerX=person.x1;
area.topLeftCornerY=person.y1;
......@@ -189,14 +130,20 @@ int HumanDetection::findHuManCar(const cv::Mat &source, int res, TCV_HumanDetect
area.bottomRightCornerX=person.x2;
area.bottomRightCornerY=person.y2;
currentPlate.push_back(area);
++count;
++count_all;
//工服
if(person.uniform != tenPlace && person.uniform != onePlace){
++count_no_uniform;
}
}
}
num = count; // 更新num为实际计数
reMap[0x02] = count_no_uniform; // 未穿工服的行人数量
reMap[0x00] = count_all; // 所有满足条件的行人数量
num = res == 0x00 ?count_all:count_no_uniform; // 更新num为实际计数
qDebug() << (res == 0 ? "findHuManCar 检测到的人数:" : "findHuManCar 未穿工服的人数:") << num;
} else if (res == 0x01) {
}
else if (res == 0x01) {
num = TCV_HumanDetectorGetNumOfCar(detector);
if (num == 0) return num; // 无车辆检测结果,提前返回
......@@ -220,7 +167,5 @@ int HumanDetection::findHuManCar(const cv::Mat &source, int res, TCV_HumanDetect
} else {
qDebug() << "参数错误";
}
TCV_ReleaseCameraStream(stream);
return num;
}
......@@ -2,29 +2,46 @@
#define HUMANDETECTION_H
#include "VidesData.h"
#include "so_human_sdk.h"
#include "ScopeSemaphoreExit.h"
#include <signal.h>
#include <QDateTime>
#include <opencv2/opencv.hpp>
#include <QDebug>
class HumanDetection
{
#include <atomic>
#include<QThread>
class HumanDetection:public QObject {
Q_OBJECT
public:
HumanDetection();
HumanDetection(const QString &modelPaths,
float carShapeConfidence);
~HumanDetection();
void initDetector();
int findHuManCar(const cv::Mat &source,int res,TCV_HumanDetector *detector,std::vector<vides_data::ParkingArea> &currentPlate);
static HumanDetection& getInstance()
{
static HumanDetection instance;
return instance;
}
void setHeightReference(float &height_reference);
int findHuManCar(const cv::Mat &source,int res,std::map<int,int>&reMap,
std::vector<vides_data::ParkingArea> &currentPlate);
void setHuManParameter(float &height_reference,int &uniformColor);
void draw_human_on_image(const cv::Mat& image, const TCV_ObjectLocation* boxes, int size);
qint64 getThreadTime() const;
bool getIsRunning() const;
void onTimeout();
private:
static HumanDetection* instance;
//高度基准
float height_reference;
float heightReference;
int uniformColor;
TCV_HumanDetector *detector;
std::atomic<qint64> thread_time;
std::atomic<bool> isRunning{false};
};
......
#include "HumanDetectionManage.h"
HumanDetectionManage::HumanDetectionManage(int humanDetectionLen):semaphore(humanDetectionLen){
this->humanDetectionLen=humanDetectionLen;
}
void HumanDetectionManage::initHumanDetectionManage(const QString &modelPaths,
float carShapeConfidence,float &height_reference,int &uniformColor) {
for (int i = 0; i < humanDetectionLen; ++i) {
HumanDetection* human=new HumanDetection(modelPaths,carShapeConfidence);
human->setHuManParameter(height_reference,uniformColor);
humanDetections.emplace_back(human);
}
}
HumanDetectionManage::~HumanDetectionManage(){
Common & instace= Common::getInstance();
for (auto task:humanDetections) {
instace.deleteObj(task);
}
}
HumanDetection* HumanDetectionManage::schedulingAlgorithm(QString sSn) {
// 获取当前时间作为基准
qint64 currentTime = QDateTime::currentSecsSinceEpoch();
// 创建一个vector来存储所有可调度的对象
std::vector<HumanDetection*> schedulableObjects;
qint64 maxWaitTime = 0;
// 记录最大等待时间的对象数量
int maxWaitTimeCount = 0;
// 遍历humanDetections,找到所有等待时间相同的未执行的HumanDetection对象
for (HumanDetection* human : humanDetections) {
if (human->getIsRunning()) continue;
// 计算此对象自上次执行以来的等待时间
qint64 waitTime = std::abs(currentTime - human->getThreadTime());
if (waitTime > maxWaitTime) {
schedulableObjects.clear();
schedulableObjects.push_back(human);
maxWaitTime = waitTime;
maxWaitTimeCount = 1;
} else if (waitTime == maxWaitTime) {
schedulableObjects.push_back(human);
maxWaitTimeCount++;
}
}
// 如果最大等待时间的对象数量为1,直接返回
if (maxWaitTimeCount == 1) {
return schedulableObjects.at(0);
}
if (schedulableObjects.empty()) {
return nullptr; // 如果没有可调度对象,返回 nullptr 或进行适当处理
}
// 在可调度的对象中随机选择一个
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dis(0, schedulableObjects.size() - 1);
return schedulableObjects[dis(gen)];
}
int HumanDetectionManage::executeFindHuManCar(const cv::Mat &source, int res,
std::vector<vides_data::ParkingArea> &currentPlate,std::map<int,int>&resMap,QString sSn) {
semaphore.acquire();
ScopeSemaphoreExit guard([this]() {
semaphore.release(); // 释放信号量
});
HumanDetection* selectedHumanDetection = schedulingAlgorithm(sSn);
if (selectedHumanDetection!=nullptr) {
// 调用选定对象的findHuManCar函数
qInfo() << "调度算法抢到===>sn"<<sSn<<"res"<<res;
int detectionResult = selectedHumanDetection->findHuManCar(source, res,resMap, currentPlate);
return detectionResult;
} else {
qDebug() << "没有可用的HumanDetection对象可以调度";
return -2;
}
}
#ifndef HUMANDETECTIONMANAGE_H
#define HUMANDETECTIONMANAGE_H
#include "HumanDetection.h"
#include "Common.h"
#include "VidesData.h"
#include "ScopeSemaphoreExit.h"
#include <QWaitCondition>
#include <QMutex>
#include <QThread>
#include <random>
#include <QSemaphore>
#include <vector>
#include <opencv2/opencv.hpp>
class HumanDetectionManage{
public:
HumanDetectionManage(int humanDetectionLen);
~HumanDetectionManage();
static HumanDetectionManage& getInstance(int humanDetectionLen)
{
static HumanDetectionManage instance(humanDetectionLen);
return instance;
}
void initHumanDetectionManage(const QString &modelPaths,
float carShapeConfidence,float &height_reference,int &uniformColor);
int executeFindHuManCar(const cv::Mat &source,int res,std::vector<vides_data::ParkingArea> &currentPlate,
std::map<int,int>&resMap, QString sSn);
HumanDetection *schedulingAlgorithm(QString sSn);
private:
static HumanDetectionManage* instance;
std::vector<HumanDetection*>humanDetections;
int humanDetectionLen;
QSemaphore semaphore;
QWaitCondition waitCondition;
QMutex mutex;
};
#endif // HUMANDETECTIONMANAGE_H
......@@ -6,27 +6,11 @@
LicensePlateRecognition::LicensePlateRecognition() {}
LicensePlateRecognition::~LicensePlateRecognition(){
HLPR_ReleaseContext(ctx);
}
LicensePlateRecognition* LicensePlateRecognition::instance = nullptr;
//void LicensePlateRecognition::initHlprContext(const QString &modelPaths, const QString &carCascade, float carConfidence){
// HLPR_ContextConfiguration configuration = {0};
// QByteArray && by_mpath=modelPaths.toUtf8();
// char* m_path=by_mpath.data();
// configuration.models_path = m_path;
// configuration.max_num = 5;
// configuration.det_level = DETECT_LEVEL_LOW;
// configuration.use_half = false;
// configuration.nms_threshold = 0.5f;
// configuration.rec_confidence_threshold = carConfidence;
// configuration.box_conf_threshold = 0.30f;
// configuration.threads = 1;
// this->carCascadeUrl=carCascade;
// ctx = HLPR_CreateContext(&configuration);
//}
void LicensePlateRecognition::oldLicensePlateNumber(const cv::Mat &source,const QString &modelPaths,QString & lpNumber){
HLPR_ImageData data = {0};
data.data = source.data;
......@@ -112,8 +96,26 @@ void LicensePlateRecognition::filterLicensePlateConfidenceMax(vides_data::reques
}
}
}
void LicensePlateRecognition::initHlprContext(const QString &modelPaths, float carConfidence){
if(ctx==nullptr){
HLPR_ContextConfiguration configuration = {0};
QByteArray && by_mpath=modelPaths.toUtf8();
char* m_path=by_mpath.data();
configuration.models_path = m_path;
configuration.max_num = 5;
configuration.det_level = DETECT_LEVEL_LOW;
configuration.use_half = false;
configuration.nms_threshold = 0.5f;
configuration.rec_confidence_threshold = carConfidence;
configuration.box_conf_threshold = 0.30f;
configuration.threads = 1;
ctx = HLPR_CreateContext(&configuration);
}
}
void LicensePlateRecognition::licensePlateNumber(const cv::Mat &source, QString &lpNumber,vides_data::requestLicensePlate &plate,
qint64 currentTime,P_HLPR_Context ctx) {
qint64 currentTime) {
// 执行一帧图像数据检测行人
......
......@@ -23,23 +23,20 @@ public:
}
//识别车牌号
void licensePlateNumber(const cv::Mat &source,QString & lpNumber, vides_data::requestLicensePlate &plate,
qint64 currentTime,P_HLPR_Context ctx);
qint64 currentTime);
void filterLicensePlateConfidenceMax(vides_data::requestLicensePlate &plate,vides_data::LicensePlate &max);
void oldLicensePlateNumber(const cv::Mat &source,const QString &modelPaths,QString & lpNumber);
// void initHlprContext(const QString &modelPaths,const QString &carCascade,float carConfidence);
void initHlprContext(const QString &modelPaths,float carConfidence);
void replaceWith1And0( QString &code);
private:
static LicensePlateRecognition* instance;
//P_HLPR_Context ctx ;
float carConfidence;
std::mutex carMutex;
private:
static LicensePlateRecognition* instance;
P_HLPR_Context ctx=nullptr ;
LicensePlateRecognition();
......
......@@ -211,56 +211,58 @@ int MediaFaceImage::ToFile(const char* pFileName, const void* pData, int nLength
fclose(fp);
return 0;
}
int MediaFaceImage::FaceImageCallBack(XSDK_HANDLE hMedia, int nChannel, cv::Mat &image) {
const int BufferSize = 1024 * 1024 * 2; // 定义缓冲区大小
//int MediaFaceImage::FaceImageCallBack(XSDK_HANDLE hMedia, int nChannel, cv::Mat &image) {
// const int BufferSize = 1024 * 1024 * 2; // 定义缓冲区大小
// image.release();
// // 使用智能指针管理资源
// std::unique_ptr<unsigned char[]> pOutBuffer(new unsigned char[BufferSize]);
// int pInOutBufferSize = 0;
// int ret = XSDK_DevSnapSyn(hMedia, nChannel, "", pOutBuffer.get(), &pInOutBufferSize);
// if (ret < 0 || pInOutBufferSize <= 0) {
// qInfo() << "同步设备端抓图失败";
// return -1;
// }
// 使用智能指针管理资源
std::unique_ptr<unsigned char[]> pOutBuffer(new unsigned char[BufferSize]);
// // 使用vector管理buffer
// std::vector<uchar> buffer(pInOutBufferSize);
// memcpy(buffer.data(), pOutBuffer.get(), pInOutBufferSize);
// image =std::move(cv::imdecode(buffer, cv::IMREAD_UNCHANGED));
// return pInOutBufferSize; // pOutBuffer由智能指针管理,此处无需手动释放
//}
int MediaFaceImage::FaceImageCallBack(XSDK_HANDLE hMedia, int nChannel, cv::Mat &image) {
const int BufferSize = 1024 * 1024 * 2; // 缓冲区大小
image.release(); // 释放之前的图像
std::unique_ptr<unsigned char[]> pOutBuffer(new unsigned char[BufferSize]); // 智能指针管理内存
int pInOutBufferSize = 0;
int ret = XSDK_DevSnapSyn(hMedia, nChannel, "", pOutBuffer.get(), &pInOutBufferSize);
if (ret < 0 || pInOutBufferSize <= 0) {
qInfo() << "同步设备端抓图失败";
return -1;
}
// 使用vector管理buffer
// 使用 std::vector 管理缓冲区数据
std::vector<uchar> buffer(pInOutBufferSize);
memcpy(buffer.data(), pOutBuffer.get(), pInOutBufferSize);
image = cv::imdecode(buffer, cv::IMREAD_UNCHANGED);
return pInOutBufferSize; // pOutBuffer由智能指针管理,此处无需手动释放
}
//int MediaFaceImage::FaceImageCallBack(XSDK_HANDLE hMedia, int nChannel, cv::Mat &image)
//{
// // static const int BufferSize = 1024 * 1024 * 2;
// // static unsigned char pOutBuffer[BufferSize];
// const int BufferSize = 1024 * 1024 * 2;
// unsigned char* pOutBuffer = new unsigned char[BufferSize];
// int pInOutBufferSize = 0;
// int ret = XSDK_DevSnapSyn(hMedia, nChannel, "", pOutBuffer, &pInOutBufferSize);
// if (ret < 0 || pInOutBufferSize<=0 ) {
// qInfo() << "同步设备端抓图失败";
// if (pOutBuffer)
// {
// delete[]pOutBuffer;
// pOutBuffer = nullptr;;
// }
// return -1;
// }
try {
cv::Mat decodedImage = cv::imdecode(buffer, cv::IMREAD_UNCHANGED);
if (decodedImage.empty()) {
qInfo() << "图像解码失败";
return -1;
}
image = std::move(decodedImage);
} catch (const cv::Exception& e) {
qInfo() << "图像解码过程中捕获异常:" << e.what();
return -1;
}
// std::vector<uchar> buffer(pInOutBufferSize);
// memcpy(buffer.data(), pOutBuffer, pInOutBufferSize);
// image =std::move(cv::imdecode(buffer, cv::IMREAD_UNCHANGED));;
// if (pOutBuffer)
// {
// delete[]pOutBuffer;
// pOutBuffer = nullptr;;
// }
// return pInOutBufferSize;
//}
return pInOutBufferSize;
}
int MediaFaceImage::CameraImage(XSDK_HANDLE hMedia,int nChannel,std::vector<uchar> &buffer){
static const int BufferSize = 1024 * 1024 * 2; // 2MB buffer size
......
......@@ -9,6 +9,7 @@ ParkingSpaceInfo::ParkingSpaceInfo(){
}
ParkingSpaceInfo::~ParkingSpaceInfo(){
qInfo() << "ParkingSpaceInfo:关闭";
}
void ParkingSpaceInfo::addQueue(RecognizedInfo &info){
......
......@@ -8,6 +8,7 @@ TaskRunnable::TaskRunnable(std::function<void()> newTask, int hDevice, int chann
if(runFunction==SdkCallbackFunction){
this->callbackFunction = newTask;
}
this->setAutoDelete(true);
}
TaskRunnable::~TaskRunnable(){
......
#include "TimeoutException.h"
TimeoutException::TimeoutException()
: std::runtime_error("Function execution timed out") {
}
TimeoutException::~TimeoutException(){
}
#ifndef TIMEOUTEXCEPTION_H
#define TIMEOUTEXCEPTION_H
#include <stdexcept>
class TimeoutException : public std::runtime_error {
public:
TimeoutException();
~TimeoutException();
};
#endif // TIMEOUTEXCEPTION_H
......@@ -20,6 +20,8 @@ namespace vides_data{
constexpr const char *HEADER_TYPE_KAY="Content-Type";
constexpr const char *HEADER_TYPE_VALUE="application/json";
constexpr const char *PROFLIE_TEST= "test";
constexpr const char *PROFLIE_DEV= "dev";
struct response
{
int code;
......
......@@ -12,46 +12,46 @@ TEMPLATE = app
# depend on your compiler). Please consult the documentation of the
# deprecated API in order to know how to port your code away from it.
DEFINES += QT_DEPRECATED_WARNINGS
DEFINES += APP_VERSION=\\\"1.0.2\\\"
#QMAKE_LIBDIR += /usr/local/lib
#INCLUDEPATH+=/usr/local/include/opencv4
#INCLUDEPATH+=/usr/local/include/hyperface
#INCLUDEPATH+=/usr/local/include/hyper
#INCLUDEPATH+=/usr/local/include/XNetSDK
#INCLUDEPATH+=/usr/local/include/human
#INCLUDEPATH+=/usr/local/include/CImg
unix:contains(QMAKE_HOST.arch, x86_64) {
QMAKE_LIBDIR += /home/mark/Public/x86_opencv/lib
}
unix:contains(QMAKE_HOST.arch, arm) {
QMAKE_LIBDIR += /usr/local/lib
}
# 根据编译器类型选择库路径和头文件路径
unix: {
# x86 架构
contains(QMAKE_HOST.arch, x86_64) {
INCLUDEPATH+=/home/mark/Public/x86_opencv/include/opencv4
INCLUDEPATH+=/home/mark/Public/x86_opencv/include/hyperface
INCLUDEPATH+=/home/mark/Public/x86_opencv/include/hyper
INCLUDEPATH+=/home/mark/Public/x86_opencv/include/XNetSDK
INCLUDEPATH+=/home/mark/Public/x86_opencv/include/human
INCLUDEPATH+=/home/mark/Public/x86_opencv/include/CImg
}
# ARM 架构
contains(QMAKE_HOST.arch, arm) {
INCLUDEPATH+=/usr/local/include/opencv4
INCLUDEPATH+=/usr/local/include/hyperface
INCLUDEPATH+=/usr/local/include/hyper
INCLUDEPATH+=/usr/local/include/XNetSDK
INCLUDEPATH+=/usr/local/include/human
}
}
DEFINES += APP_VERSION=\\\"1.0.3\\\"
QMAKE_LIBDIR += /usr/local/lib
INCLUDEPATH+=/usr/local/include/opencv4
INCLUDEPATH+=/usr/local/include/hyperface
INCLUDEPATH+=/usr/local/include/hyper
INCLUDEPATH+=/usr/local/include/XNetSDK
INCLUDEPATH+=/usr/local/include/human
INCLUDEPATH+=/usr/local/include/CImg
#unix:contains(QMAKE_HOST.arch, x86_64) {
# QMAKE_LIBDIR += /home/mark/Public/x86_opencv/lib
#}
#unix:contains(QMAKE_HOST.arch, arm) {
# QMAKE_LIBDIR += /usr/local/lib
#}
## 根据编译器类型选择库路径和头文件路径
#unix: {
# # x86 架构
# contains(QMAKE_HOST.arch, x86_64) {
# INCLUDEPATH+=/home/mark/Public/x86_opencv/include/opencv4
# INCLUDEPATH+=/home/mark/Public/x86_opencv/include/hyperface
# INCLUDEPATH+=/home/mark/Public/x86_opencv/include/hyper
# INCLUDEPATH+=/home/mark/Public/x86_opencv/include/XNetSDK
# INCLUDEPATH+=/home/mark/Public/x86_opencv/include/human
# INCLUDEPATH+=/home/mark/Public/x86_opencv/include/CImg
# }
# # ARM 架构
# contains(QMAKE_HOST.arch, arm) {
# INCLUDEPATH+=/usr/local/include/opencv4
# INCLUDEPATH+=/usr/local/include/hyperface
# INCLUDEPATH+=/usr/local/include/hyper
# INCLUDEPATH+=/usr/local/include/XNetSDK
# INCLUDEPATH+=/usr/local/include/human
# }
#}
# You can also make your code fail to compile if it uses deprecated APIs.
# In order to do so, uncomment the following line.
......@@ -83,7 +83,6 @@ LIBS += -lopencv_core \
#-lz
SOURCES += \
Common.cpp \
FaceReconition.cpp \
LogHandler.cpp \
main.cpp \
mainwindow.cpp \
......@@ -96,11 +95,12 @@ SOURCES += \
CameraHandle.cpp \
ParkingSpaceInfo.cpp \
HumanDetection.cpp \
ScopeSemaphoreExit.cpp
ScopeSemaphoreExit.cpp \
FaceReconitionHandle.cpp \
HumanDetectionManage.cpp
HEADERS += \
Common.h \
FaceRecognition.h \
LogHandle.h \
mainwindow.h \
LicensePlateRecognition.h \
......@@ -113,7 +113,9 @@ HEADERS += \
CameraHandle.h \
ParkingSpaceInfo.h \
HumanDetection.h \
ScopeSemaphoreExit.h
ScopeSemaphoreExit.h \
FaceReconitionHandle.h \
HumanDetectionManage.h
#FORMS += \
# mainwindow.ui
......
......@@ -7,6 +7,8 @@ MainWindow::MainWindow()
{
sp_this=this;
LogHandler::Get().installMessageHandler();
QString inifile=QCoreApplication::applicationDirPath()+"/gameras.ini";
......@@ -16,6 +18,9 @@ MainWindow::MainWindow()
modelPaths=qSetting->value("licensePlateRecognition/model_paths").toString();
initCommon();
QThreadPool* threadPool = QThreadPool::globalInstance();
threadPool->setMaxThreadCount(12);
deleteLogFileTimer =new QTimer(this);
connect(deleteLogFileTimer, &QTimer::timeout, this, &MainWindow::deleteLogFile);
......@@ -29,20 +34,35 @@ MainWindow::MainWindow()
initFaceFaceRecognition();
FaceReconition &faceRecognition = FaceReconition::getInstance();
float confidence=qSetting->value("devices/confidence").toFloat();
int faceNumbers=qSetting->value("devices/faceNumbers").toInt();
// FaceReconition &faceRecognition = FaceReconition::getInstance();
// float confidence=qSetting->value("devices/confidence").toFloat();
// int faceNumbers=qSetting->value("devices/faceNumbers").toInt();
if(localImageMap.size()>0){
faceRecognition.initSourceImageMap(localImageMap,faceNumbers,confidence);
}
// if(localImageMap.size()>0){
// faceRecognition.initSourceImageMap(localImageMap,faceNumbers,confidence);
// }
float heightReference=qSetting->value("devices/height_reference").toFloat();
int uniformColor=qSetting->value("devices/uniformColor").toInt();
int humanDetectionLen=qSetting->value("devices/humanDetectionLen").toInt();
float carShapeConfidence=qSetting->value("devices/carShapeConfidence").toFloat();
Common & instace= Common::getInstance();
instace.setHumanDetectionLen(humanDetectionLen);
float carConfidence=qSetting->value("devices/carConfidence").toFloat();
LicensePlateRecognition &licensePlateRecogn =LicensePlateRecognition::getInstance();
licensePlateRecogn.initHlprContext(modelPaths,carConfidence);
//LicensePlateRecognition &licensePlateRecogn =LicensePlateRecognition::getInstance();
//licensePlateRecogn.initHlprContext(modelPaths,qSetting->value("licensePlateRecognition/car_cascade_path").toString(),carConfidence);
HumanDetectionManage &humanDetectionManage= HumanDetectionManage::getInstance(humanDetectionLen);
humanDetectionManage.initHumanDetectionManage(modelPaths,carShapeConfidence,heightReference,uniformColor);
QString httpurl;
QString profile=qSetting->value("cloudservice/profile","test").toString();
if(strcmp(profile.toUtf8().data(),vides_data::PROFLIE_TEST)==0){
if(strcmp(profile.toUtf8().data(),vides_data::PROFLIE_TEST)==0 ){
httpurl=qSetting->value("cloudservice/test_http").toString();
}else if(strcmp(profile.toUtf8().data(),vides_data::PROFLIE_DEV)==0 ) {
httpurl=qSetting->value("cloudservice/dev_http").toString();
}else{
httpurl=qSetting->value("cloudservice/pro_http").toString();
}
......@@ -65,6 +85,7 @@ MainWindow::MainWindow()
},Qt::QueuedConnection);
this->startCamera(httpurl);
batchUpdatesCameraImageMap();
// 设置定时器间隔
dePermissionSynTimer->setInterval(dePermissionTimer);
......@@ -272,22 +293,47 @@ void MainWindow::updateLocalFace(const QString &httpurl) {
}
}
FaceReconition &faceRecognition = FaceReconition::getInstance();
if (isChanged) {
if (cloudImageMap.empty()) {
// 如果云端映射现在为空,移除所有特征
faceRecognition.featureRemove();
//faceRecognition.featureRemove();
batchUpdatesFeatureRemove();
} else {
float confidence=qSetting->value("devices/confidence").toFloat();
int faceNumbers=qSetting->value("devices/faceNumbers").toInt();
//float confidence=qSetting->value("devices/confidence").toFloat();
//int faceNumbers=qSetting->value("devices/faceNumbers").toInt();
qDebug()<<"startMap != endMap-->";
faceRecognition.initSourceImageMap(localImageMap,faceNumbers, confidence);
// faceRecognition.initSourceImageMap(localImageMap,faceNumbers, confidence);
batchUpdatesCameraImageMap();
}
}
for (vides_data::responseFaceReconition* data : datas)
{
instance.deleteObj(data);
}
datas.clear(); // 清空列表
instance.deleteObj(res);
}
void MainWindow::batchUpdatesCameraImageMap(){
float confidence=qSetting->value("devices/confidence").toFloat();
int faceNumbers=qSetting->value("devices/faceNumbers").toInt();
for(auto iter = faceDetectionParkingPushs.begin(); iter != faceDetectionParkingPushs.end(); ++iter) {
CameraHandle*value= iter->second;
if(localImageMap.size()>0){
value->notificationUpdateImageMap(localImageMap,faceNumbers,confidence);
}
}
}
void MainWindow::batchUpdatesFeatureRemove(){
for(auto iter = faceDetectionParkingPushs.begin(); iter != faceDetectionParkingPushs.end(); ++iter) {
CameraHandle*value= iter->second;
if(localImageMap.size()>0){
value->featureRemove();
}
}
}
void MainWindow::findLocalSerialNumber(QString &serialNumber){
if(vides_data::isVirtualMachine()){
serialNumber = QSysInfo::machineUniqueId();
......@@ -357,10 +403,17 @@ void MainWindow::startCamera(const QString &httpurl){
std::map<QString,vides_data::localDeviceStatus*> localDevices;
mediaFaceImage->SdkSearchDevicesSyn(localDevices);
if(localDevices.size()<=0){
httpService.setHttpUrl(httpurl);
vides_data::response *res=httpService.httpPostDeviceStatus(reStatus);
if(res->code!=0){
qInfo()<<"盒子状态上报失败 code:"<<res->code<<"msg:"<<res->msg;
}
instace.deleteObj(re);
instace.deleteObj(res);
return ;
}
int alg=devices.algorithm;
for (const auto& device : devices.list) {
if(localDevices.count(device.sSn)>0 ){
vides_data::localDeviceStatus* localDevice= localDevices.at(device.sSn);
......@@ -407,6 +460,7 @@ void MainWindow::startCamera(const QString &httpurl){
}
}
}
this->deleteCloudNotCamer(localDevices, devices.list);
for (auto& pair : localDevices) {
......@@ -417,6 +471,7 @@ void MainWindow::startCamera(const QString &httpurl){
// 清空 localDevices 容器
localDevices.clear();
}
httpService.setHttpUrl(httpurl);
vides_data::response *res=httpService.httpPostDeviceStatus(reStatus);
if(res->code!=0){
......@@ -425,6 +480,7 @@ void MainWindow::startCamera(const QString &httpurl){
instace.deleteObj(res);
updateLocalFace(httpurl);
instace.deleteObj(re);
......@@ -760,15 +816,11 @@ __uint8_t MainWindow::intToUint8t(int algorithm){
void MainWindow::initCameras(vides_data::cameraParameters &parameter,const std::list<vides_data::responseArea>&areas,int algorithm,std::list<vides_data::requestCameraInfo>&camera_info_list){
MediaFaceImage* mediaFaceImage= MediaFaceImage::getInstance();
float carConfidence=qSetting->value("devices/carConfidence").toFloat();
int image_save=qSetting->value("devices/image_save").toInt();
float heightReference=qSetting->value("devices/height_reference").toFloat();
float carShapeConfidence=qSetting->value("devices/carShapeConfidence").toFloat();
CameraHandle * cameraHandle =new CameraHandle(parameter.sDevId,parameter.httpUrl,parameter.sSn,parameter.channel,image_save);
CameraHandle * cameraHandle =new CameraHandle(parameter.sDevId,parameter.httpUrl,parameter.sSn,parameter.channel,modelPaths,carConfidence,carShapeConfidence,image_save);
int sdk_handle=cameraHandle->sdkDevLoginSyn(parameter.sDevId,parameter.nDevPort,parameter.sUserName,parameter.sPassword,10000);
int sdk_handle=cameraHandle->sdkDevLoginSyn(parameter.sDevId,parameter.nDevPort,parameter.sUserName,parameter.sPassword,3000);
qDebug()<<"句柄为2:"<<sdk_handle;
if(sdk_handle<=0){
qInfo() << "登录失败";
......@@ -783,10 +835,10 @@ void MainWindow::initCameras(vides_data::cameraParameters &parameter,const std::
uint64 face_frequency=qSetting->value("devices/face_frequency").toULongLong();
cameraHandle->initSdkRealTimeDevSnapSyn(sdk_handle,synTime,face_frequency);
cameraHandle->initAlgorithmParameter(heightReference);
QString pwd="admin2024";
QString sid="MERCURY_8C4F";
cameraHandle->sdkWifi(pwd,sid);
// QString pwd="admin2024";
// QString sid="MERCURY_8C4F";
// cameraHandle->sdkWifi(pwd,sid);
vides_data::requestCameraInfo camera_info;
camera_info.sSn=parameter.sSn;
camera_info.ip_addr=parameter.sDevId;
......
......@@ -2,14 +2,12 @@
#define MAINWINDOW_H
#include "Common.h"
#include "FaceRecognition.h"
#include "LicensePlateRecognition.h"
#include "hyper_lpr_sdk.h"
#include "CameraHandle.h"
#include "HttpService.h"
#include "VidesData.h"
#include "MediaFaceImage.h"
#include "HumanDetection.h"
#include "HumanDetectionManage.h"
#include <algorithm>
#include <QString>
#include <QTextCodec>
......@@ -47,6 +45,7 @@ public:
const std::list<vides_data::responseArea>&areas,int algorithm,std::list<vides_data::requestCameraInfo>&camera_info_list);
__uint8_t intToUint8t(int algorithm);
static MainWindow * sp_this;
CameraHandle* findHandle(QString sn);
......@@ -79,6 +78,11 @@ public:
void deleteCloudNotCamer (const std::map<QString,vides_data::localDeviceStatus*>& localDevices,
const std::list<vides_data::responseDeviceStatus>& devices);
void batchUpdatesCameraImageMap();
void batchUpdatesFeatureRemove();
~MainWindow();
signals:
void shutdownSignals(QString sDevId, int nDevPort);
......@@ -116,5 +120,6 @@ private:
QString modelPaths;
std::map<QString,CameraHandle*>faceDetectionParkingPushs;
};
#endif // MAINWINDOW_H
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment