复现PointNet++(语义分割网络):Windows + PyTorch + S3DIS语义分割 + 代码

一、平台

Windows 10

GPU RTX 3090 + CUDA 11.1 + cudnn 8.9.6

Python 3.9

Torch 1.9.1 + cu111

所用的原始代码:https://github.com/yanx27/Pointnet_Pointnet2_pytorch

二、数据

Stanford3dDataset_v1.2_Aligned_Version

三、代码

分享给有需要的人,代码质量勿喷。

对源代码进行了简化和注释。

分割结果保存成txt,或者利用 laspy 生成点云。

别问为啥在C盘,问就是2T的三星980Pro

3.1 文件组织结构

3.2 数据预处理

3.2.1 run_collect_indoor3d_data.py 生成*.npy文件

改了路径

3.2.2 indoor3d_util.py

改了路径

3.2.3 S3DISDataLoader.py

改了路径

3.3 训练 train_SematicSegmentation.py

# 参考
# https://github.com/yanx27/Pointnet_Pointnet2_pytorch
# 先在Terminal运行:python -m visdom.server
# 再运行本文件

import argparse
import os
# import datetime
import logging
import importlib
import shutil
from tqdm import tqdm
import numpy as np
import time
import visdom
import torch
import warnings
warnings.filterwarnings('ignore')

from dataset.S3DISDataLoader import S3DISDataset
from PointNet2 import dataProcess


# PointNet
from PointNet2.pointnet_sem_seg import get_model as PNss
from PointNet2.pointnet_sem_seg import get_loss as PNloss

# PointNet++
from PointNet2.pointnet2_sem_seg import get_model as PN2SS
from PointNet2.pointnet2_sem_seg import get_loss as PN2loss


# True为PointNet++
PN2bool = True
# PN2bool = False


# 当前文件的路径
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))

# 训练输出模型的路径: PointNet
dirModel1 = ROOT_DIR + '/trainModel/pointnet_model'
if not os.path.exists(dirModel1):
        os.makedirs(dirModel1)
# 训练输出模型的路径
dirModel2 = ROOT_DIR + '/trainModel/PointNet2_model'
if not os.path.exists(dirModel2):
        os.makedirs(dirModel2)

# 日志的路径
pathLog = os.path.join(ROOT_DIR, 'LOG_train.txt')

# 数据集的路径
pathDataset = os.path.join(ROOT_DIR, 'dataset/stanford_indoor3d/')

# 分类的类别
classNumber = 13
classes = ['ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door', 'table', 'chair', 'sofa', 'bookcase',
           'board', 'clutter']
class2label = {cls: i for i, cls in enumerate(classes)}
seg_classes = class2label
seg_label_to_cat = {}
for i, cat in enumerate(seg_classes.keys()):
    seg_label_to_cat[i] = cat

# 日志和输出
def log_string(str):
    logger.info(str)
    print(str)

def inplace_relu(m):
    classname = m.__class__.__name__
    if classname.find('ReLU') != -1:
        m.inplace=True

def parse_args():
    parser = argparse.ArgumentParser('Model')
    parser.add_argument('--pnModel', type=bool, default=True, help='True = PointNet++;False = PointNet')
    parser.add_argument('--batch_size', type=int, default=32, help='Batch Size during training [default: 32]')
    parser.add_argument('--epoch', default=320, type=int, help='Epoch to run [default: 32]')
    parser.add_argument('--learning_rate', default=0.001, type=float, help='Initial learning rate [default: 0.001]')
    parser.add_argument('--GPU', type=str, default='0', help='GPU to use [default: GPU 0]')
    parser.add_argument('--optimizer', type=str, default='Adam', help='Adam or SGD [default: Adam]')
    parser.add_argument('--decay_rate', type=float, default=1e-4, help='weight decay [default: 1e-4]')
    parser.add_argument('--npoint', type=int, default=4096, help='Point Number [default: 4096]')
    parser.add_argument('--step_size', type=int, default=10, help='Decay step for lr decay [default: every 10 epochs]')
    parser.add_argument('--lr_decay', type=float, default=0.7, help='Decay rate for lr decay [default: 0.7]')
    parser.add_argument('--test_area', type=int, default=5, help='Which area to use for test, option: 1-6 [default: 5]')
    return parser.parse_args()


if __name__ == '__main__':
    # python -m visdom.server
    visdomTL = visdom.Visdom()
    visdomTLwindow = visdomTL.line([0], [0], opts=dict(title='train_loss'))
    visdomVL = visdom.Visdom()
    visdomVLwindow = visdomVL.line([0], [0], opts=dict(title='validate_loss'))
    visdomTVL = visdom.Visdom(env='PointNet++')

    # region 创建日志文件
    logger = logging.getLogger("train")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler(pathLog)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    #endregion

    #region 超参数
    args = parse_args()
    args.pnModel = PN2bool
    log_string('------------ hyper-parameter ------------')
    log_string(args)
    # 指定GPU
    os.environ["CUDA_VISIBLE_DEVICES"] = args.GPU
    pointNumber = args.npoint
    batchSize = args.batch_size
    #endregion

    # region dataset
    # train data
    trainData = S3DISDataset(split='train',
                             data_root=pathDataset, num_point=pointNumber,
                             test_area=args.test_area, block_size=1.0, sample_rate=1.0, transform=None)
    trainDataLoader = torch.utils.data.DataLoader(trainData, batch_size=batchSize, shuffle=True, num_workers=0,
                                                  pin_memory=True, drop_last=True,
                                                  worker_init_fn=lambda x: np.random.seed(x + int(time.time())))
    # Validation data
    testData = S3DISDataset(split='test',
                            data_root=pathDataset, num_point=pointNumber,
                            test_area=args.test_area, block_size=1.0, sample_rate=1.0, transform=None)
    testDataLoader = torch.utils.data.DataLoader(testData, batch_size=batchSize, shuffle=False, num_workers=0,
                                                 pin_memory=True, drop_last=True)
    log_string("The number of training data is: %d" % len(trainData))
    log_string("The number of validation data is: %d" % len(testData))

    weights = torch.Tensor(trainData.labelweights).cuda()
    #endregion


    # region loading model:使用预训练模型或新训练
    modelSS = ''
    criterion = ''
    if PN2bool:
        modelSS = PN2SS(classNumber).cuda()
        criterion = PN2loss().cuda()
        modelSS.apply(inplace_relu)
    else:
        modelSS = PNss(classNumber).cuda()
        criterion = PNloss().cuda()
        modelSS.apply(inplace_relu)



    # 权重初始化
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv2d') != -1:
            torch.nn.init.xavier_normal_(m.weight.data)
            torch.nn.init.constant_(m.bias.data, 0.0)
        elif classname.find('Linear') != -1:
            torch.nn.init.xavier_normal_(m.weight.data)
            torch.nn.init.constant_(m.bias.data, 0.0)

    try:
        path_premodel = ''
        if PN2bool:
            path_premodel = os.path.join(dirModel2, 'best_model_S3DIS.pth')
        else:
            path_premodel = os.path.join(dirModel1, 'best_model_S3DIS.pth')
        checkpoint = torch.load(path_premodel)
        start_epoch = checkpoint['epoch']
        # print('pretrain epoch = '+str(start_epoch))
        modelSS.load_state_dict(checkpoint['model_state_dict'])
        log_string('!!!!!!!!!! Use pretrain model')
    except:
        log_string('...... starting new training ......')
        start_epoch = 0
        modelSS = modelSS.apply(weights_init)
    #endregion
    
    # start_epoch = 0
    # modelSS = modelSS.apply(weights_init)


    #region 训练的参数和选项
    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(
            modelSS.parameters(),
            lr=args.learning_rate,
            betas=(0.9, 0.999),
            eps=1e-08,
            weight_decay=args.decay_rate
        )
    else:
        optimizer = torch.optim.SGD(modelSS.parameters(), lr=args.learning_rate, momentum=0.9)

    def bn_momentum_adjust(m, momentum):
        if isinstance(m, torch.nn.BatchNorm2d) or isinstance(m, torch.nn.BatchNorm1d):
            m.momentum = momentum

    LEARNING_RATE_CLIP = 1e-5
    MOMENTUM_ORIGINAL = 0.1
    MOMENTUM_DECCAY = 0.5
    MOMENTUM_DECCAY_STEP = args.step_size

    global_epoch = 0
    best_iou = 0
    #endregion


    for epoch in range(start_epoch, args.epoch):
        # region Train on chopped scenes
        log_string('****** Epoch %d (%d/%s) ******' % (global_epoch + 1, epoch + 1, args.epoch))

        lr = max(args.learning_rate * (args.lr_decay ** (epoch // args.step_size)), LEARNING_RATE_CLIP)
        log_string('Learning rate:%f' % lr)
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr

        momentum = MOMENTUM_ORIGINAL * (MOMENTUM_DECCAY ** (epoch // MOMENTUM_DECCAY_STEP))
        if momentum < 0.01:
            momentum = 0.01
        log_string('BN momentum updated to: %f' % momentum)

        modelSS = modelSS.apply(lambda x: bn_momentum_adjust(x, momentum))
        modelSS = modelSS.train()
        #endregion

        # region 训练
        num_batches = len(trainDataLoader)
        total_correct = 0
        total_seen = 0
        loss_sum = 0
        for i, (points, target) in tqdm(enumerate(trainDataLoader), total=len(trainDataLoader), smoothing=0.9):
            # 梯度归零
            optimizer.zero_grad()

            # xyzL
            points = points.data.numpy() # ndarray = bs,4096,9(xyz rgb nxnynz)
            points[:, :, :3] = dataProcess.rotate_point_cloud_z(points[:, :, :3]) ## 数据处理的操作
            points = torch.Tensor(points) # tensor = bs,4096,9
            points, target = points.float().cuda(), target.long().cuda()
            points = points.transpose(2, 1) # tensor = bs,9,4096

            # 预测结果
            seg_pred, trans_feat = modelSS(points) # tensor = bs,4096,13  # tensor = bs,512,16
            seg_pred = seg_pred.contiguous().view(-1, classNumber) # tensor = (bs*4096=)点数量,13

            # 真实标签
            batch_label = target.view(-1, 1)[:, 0].cpu().data.numpy() # ndarray = (bs*4096=)点数量
            target = target.view(-1, 1)[:, 0] # tensor = (bs*4096=)点数量

            # loss
            loss = criterion(seg_pred, target, trans_feat, weights)
            loss.backward()

            # 优化器来更新模型的参数
            optimizer.step()

            pred_choice = seg_pred.cpu().data.max(1)[1].numpy() # ndarray = (bs*4096=)点数量
            correct = np.sum(pred_choice == batch_label) # 预测正确的点数量

            total_correct += correct
            total_seen += (batchSize * pointNumber)
            loss_sum += loss
        log_string('Training mean loss: %f' % (loss_sum / num_batches))
        log_string('Training accuracy: %f' % (total_correct / float(total_seen)))

        # draw
        trainLoss = (loss_sum.item()) / num_batches
        visdomTL.line([trainLoss], [epoch+1], win=visdomTLwindow, update='append')
        #endregion

        # region 保存模型
        if epoch % 1 == 0:
            modelpath=''
            if PN2bool:
                modelpath = os.path.join(dirModel2, 'model' + str(epoch + 1) + '_S3DIS.pth')
            else:
                modelpath = os.path.join(dirModel1, 'model' + str(epoch + 1) + '_S3DIS.pth')


            state = {
                'epoch': epoch,
                'model_state_dict': modelSS.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }
            torch.save(state, modelpath)
            logger.info('Save model...'+modelpath)
        #endregion

        # region Evaluate on chopped scenes
        with torch.no_grad():
            num_batches = len(testDataLoader)
            total_correct = 0
            total_seen = 0
            loss_sum = 0
            labelweights = np.zeros(classNumber)
            total_seen_class = [0 for _ in range(classNumber)]
            total_correct_class = [0 for _ in range(classNumber)]
            total_iou_deno_class = [0 for _ in range(classNumber)]
            modelSS = modelSS.eval()

            log_string('****** Epoch Evaluation %d (%d/%s) ******' % (global_epoch + 1, epoch + 1, args.epoch))
            for i, (points, target) in tqdm(enumerate(testDataLoader), total=len(testDataLoader), smoothing=0.9):
                points = points.data.numpy() # ndarray = bs,4096,9
                points = torch.Tensor(points) # tensor = bs,4096,9
                points, target = points.float().cuda(), target.long().cuda() # tensor = bs,4096,9 # tensor = bs,4096
                points = points.transpose(2, 1) # tensor = bs,9,4096

                seg_pred, trans_feat = modelSS(points) # tensor = bs,4096,13 # tensor = bs,512,16
                pred_val = seg_pred.contiguous().cpu().data.numpy() # ndarray = bs,4096,13
                seg_pred = seg_pred.contiguous().view(-1, classNumber) # tensor = bs*4096,13

                batch_label = target.cpu().data.numpy() # ndarray = bs,4096
                target = target.view(-1, 1)[:, 0] # tensor = bs*4096
                loss = criterion(seg_pred, target, trans_feat, weights)
                loss_sum += loss
                pred_val = np.argmax(pred_val, 2) # ndarray = bs,4096
                correct = np.sum((pred_val == batch_label))
                total_correct += correct
                total_seen += (batchSize * pointNumber)
                tmp, _ = np.histogram(batch_label, range(classNumber + 1))
                labelweights += tmp

                for l in range(classNumber):
                    total_seen_class[l] += np.sum((batch_label == l))
                    total_correct_class[l] += np.sum((pred_val == l) & (batch_label == l))
                    total_iou_deno_class[l] += np.sum(((pred_val == l) | (batch_label == l)))

            labelweights = labelweights.astype(np.float32) / np.sum(labelweights.astype(np.float32))
            mIoU = np.mean(np.array(total_correct_class) / (np.array(total_iou_deno_class, dtype=np.float64) + 1e-6))
            log_string('eval mean loss: %f' % (loss_sum / float(num_batches)))
            log_string('eval point avg class IoU: %f' % (mIoU))
            log_string('eval point accuracy: %f' % (total_correct / float(total_seen)))
            log_string('eval point avg class acc: %f' % (
                np.mean(np.array(total_correct_class) / (np.array(total_seen_class, dtype=np.float64) + 1e-6))))

            iou_per_class_str = '------- IoU --------\n'
            for l in range(classNumber):
                iou_per_class_str += 'class %s weight: %.3f, IoU: %.3f \n' % (
                    seg_label_to_cat[l] + ' ' * (14 - len(seg_label_to_cat[l])), labelweights[l - 1],
                    total_correct_class[l] / float(total_iou_deno_class[l]))

            log_string(iou_per_class_str)
            log_string('Eval mean loss: %f' % (loss_sum / num_batches))
            log_string('Eval accuracy: %f' % (total_correct / float(total_seen)))

            # draw
            valLoss = (loss_sum.item()) / num_batches
            visdomVL.line([valLoss], [epoch+1], win=visdomVLwindow, update='append')

            # region 根据 mIoU确定最佳模型
            if mIoU >= best_iou:
                best_iou = mIoU
                bestmodelpath = ''
                if PN2bool:
                    bestmodelpath = os.path.join(dirModel2, 'best_model_S3DIS.pth')
                else:
                    bestmodelpath = os.path.join(dirModel1, 'best_model_S3DIS.pth')
                state = {
                    'epoch': epoch,
                    'class_avg_iou': mIoU,
                    'model_state_dict': modelSS.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                }
                torch.save(state, bestmodelpath)
                logger.info('Save best model......'+bestmodelpath)
            log_string('Best mIoU: %f' % best_iou)
            #endregion

        #endregion

        global_epoch += 1

        # draw
        visdomTVL.line(X=[epoch+1], Y=[trainLoss],name="train loss", win='line', update='append',
                       opts=dict(showlegend=True, markers=False,
                                 title='PointNet++ train validate loss',
                                 xlabel='epoch', ylabel='loss'))
        visdomTVL.line(X=[epoch+1], Y=[valLoss], name="train loss", win='line', update='append')

    log_string('-------------------------------------------------\n\n')

3.4 预测测试 test_SematicSegmentation.py

# 参考
# https://github.com/yanx27/Pointnet_Pointnet2_pytorch

import argparse
import sys
import os
import numpy as np
import logging
from pathlib import Path
import importlib
from tqdm import tqdm
import torch
import warnings
warnings.filterwarnings('ignore')

from dataset.S3DISDataLoader import ScannetDatasetWholeScene
from dataset.indoor3d_util import g_label2color

# PointNet
from PointNet2.pointnet_sem_seg import get_model as PNss
# PointNet++
from PointNet2.pointnet2_sem_seg import get_model as PN2SS



PN2bool = True
# PN2bool = False


# region 函数:投票;日志输出;保存结果为las。
# 投票决定结果
def add_vote(vote_label_pool, point_idx, pred_label, weight):
    B = pred_label.shape[0]
    N = pred_label.shape[1]
    for b in range(B):
        for n in range(N):
            if weight[b, n] != 0 and not np.isinf(weight[b, n]):
                vote_label_pool[int(point_idx[b, n]), int(pred_label[b, n])] += 1
    return vote_label_pool


# 日志
def log_string(str):
    logger.info(str)
    print(str)


# save to LAS
import laspy
def SaveResultLAS(newLasPath, point_np, rgb_np, label1, label2):
    # data
    newx = point_np[:, 0]
    newy = point_np[:, 1]
    newz = point_np[:, 2]
    newred = rgb_np[:, 0]
    newgreen = rgb_np[:, 1]
    newblue = rgb_np[:, 2]
    newclassification = label1
    newuserdata = label2
    minx = min(newx)
    miny = min(newy)
    minz = min(newz)

    # create a new header
    newheader = laspy.LasHeader(point_format=3, version="1.2")
    newheader.scales = np.array([0.0001, 0.0001, 0.0001])
    newheader.offsets = np.array([minx, miny, minz])
    newheader.add_extra_dim(laspy.ExtraBytesParams(name="Classification", type=np.uint8))
    newheader.add_extra_dim(laspy.ExtraBytesParams(name="UserData", type=np.uint8))
    # create a Las
    newlas = laspy.LasData(newheader)
    newlas.x = newx
    newlas.y = newy
    newlas.z = newz
    newlas.red = newred
    newlas.green = newgreen
    newlas.blue = newblue
    newlas.Classification = newclassification
    newlas.UserData = newuserdata
    # write
    newlas.write(newLasPath)

# 超参数
def parse_args():
    parser = argparse.ArgumentParser('Model')
    parser.add_argument('--pnModel', type=bool, default=True, help='True = PointNet++;False = PointNet')
    parser.add_argument('--batch_size', type=int, default=32, help='batch size in testing [default: 32]')
    parser.add_argument('--GPU', type=str, default='0', help='specify GPU device')
    parser.add_argument('--num_point', type=int, default=4096, help='point number [default: 4096]')
    parser.add_argument('--test_area', type=int, default=5, help='area for testing, option: 1-6 [default: 5]')
    parser.add_argument('--num_votes', type=int, default=1,
                        help='aggregate segmentation scores with voting [default: 1]')
    return parser.parse_args()

#endregion


# 当前文件的路径
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))

# 模型的路径
pathTrainModel = os.path.join(ROOT_DIR, 'trainModel/pointnet_model')
if PN2bool:
    pathTrainModel = os.path.join(ROOT_DIR, 'trainModel/PointNet2_model')

# 结果路径
visual_dir = ROOT_DIR + '/testResultPN/'
if PN2bool:
    visual_dir = ROOT_DIR + '/testResultPN2/'
visual_dir = Path(visual_dir)
visual_dir.mkdir(exist_ok=True)

# 日志的路径
pathLog = os.path.join(ROOT_DIR, 'LOG_test_eval.txt')

# 数据集的路径
pathDataset = os.path.join(ROOT_DIR, 'dataset/stanford_indoor3d/')

# 分割类别排序
classNumber = 13
classes = ['ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door', 'table', 'chair', 'sofa', 'bookcase',
           'board', 'clutter']
class2label = {cls: i for i, cls in enumerate(classes)}
seg_classes = class2label
seg_label_to_cat = {}
for i, cat in enumerate(seg_classes.keys()):
    seg_label_to_cat[i] = cat


if __name__ == '__main__':
    #region LOG info
    logger = logging.getLogger("test_eval")
    logger.setLevel(logging.INFO) #日志级别:DEBUG, INFO, WARNING, ERROR, 和 CRITICAL
    file_handler = logging.FileHandler(pathLog)
    file_handler.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    #endregion

    #region 超参数
    args = parse_args()
    args.pnModel = PN2bool
    log_string('--- hyper-parameter ---')
    log_string(args)
    os.environ["CUDA_VISIBLE_DEVICES"] = args.GPU
    batchSize = args.batch_size
    pointNumber = args.num_point
    testArea = args.test_area
    voteNumber = args.num_votes
    #endregion


    #region ---------- 加载语义分割的模型 ----------
    log_string("---------- Loading sematic segmentation model ----------")
    ssModel = ''
    if PN2bool:
        ssModel = PN2SS(classNumber).cuda()
    else:
        ssModel = PNss(classNumber).cuda()
    path_model = os.path.join(pathTrainModel, 'best_model_S3DIS.pth')
    checkpoint = torch.load(path_model)
    ssModel.load_state_dict(checkpoint['model_state_dict'])
    ssModel = ssModel.eval()
    #endregion


    # 模型推断(inference)或评估(evaluation)阶段,不需要计算梯度,而且关闭梯度计算可以显著减少内存占用,加速计算。
    log_string('--- Evaluation whole scene')
    with torch.no_grad():
        # IOU 结果
        total_seen_class = [0 for _ in range(classNumber)]
        total_correct_class = [0 for _ in range(classNumber)]
        total_iou_deno_class = [0 for _ in range(classNumber)]

        # 测试区域的所有文件
        testDataset = ScannetDatasetWholeScene(pathDataset, split='test', test_area=testArea, block_points=pointNumber)
        scene_id_name = testDataset.file_list
        scene_id_name = [x[:-4] for x in scene_id_name] # 名称(无扩展名)
        testCount = len(scene_id_name)
        testCount = 1
        # 遍历需要预测的物体
        for batch_idx in range(testCount):
            log_string("Inference [%d/%d] %s ..." % (batch_idx + 1, testCount, scene_id_name[batch_idx]))
            # 数据
            whole_scene_data = testDataset.scene_points_list[batch_idx]
            # 真值
            whole_scene_label = testDataset.semantic_labels_list[batch_idx]
            whole_scene_labelR = np.reshape(whole_scene_label, (whole_scene_label.size, 1))
            # 预测标签
            vote_label_pool = np.zeros((whole_scene_label.shape[0], classNumber))

            # 同一物体多次预测
            for _ in tqdm(range(voteNumber), total=voteNumber):
                scene_data, scene_label, scene_smpw, scene_point_index = testDataset[batch_idx]
                num_blocks = scene_data.shape[0]
                s_batch_num = (num_blocks + batchSize - 1) // batchSize
                batch_data = np.zeros((batchSize, pointNumber, 9))

                batch_label = np.zeros((batchSize, pointNumber))
                batch_point_index = np.zeros((batchSize, pointNumber))
                batch_smpw = np.zeros((batchSize, pointNumber))

                for sbatch in range(s_batch_num):
                    start_idx = sbatch * batchSize
                    end_idx = min((sbatch + 1) * batchSize, num_blocks)
                    real_batch_size = end_idx - start_idx
                    batch_data[0:real_batch_size, ...] = scene_data[start_idx:end_idx, ...]
                    batch_label[0:real_batch_size, ...] = scene_label[start_idx:end_idx, ...]
                    batch_point_index[0:real_batch_size, ...] = scene_point_index[start_idx:end_idx, ...]
                    batch_smpw[0:real_batch_size, ...] = scene_smpw[start_idx:end_idx, ...]
                    batch_data[:, :, 3:6] /= 1.0

                    torch_data = torch.Tensor(batch_data)
                    torch_data = torch_data.float().cuda()
                    torch_data = torch_data.transpose(2, 1)
                    seg_pred, _ = ssModel(torch_data)
                    batch_pred_label = seg_pred.contiguous().cpu().data.max(2)[1].numpy()

                    # 投票产生预测标签
                    vote_label_pool = add_vote(vote_label_pool, batch_point_index[0:real_batch_size, ...],
                                               batch_pred_label[0:real_batch_size, ...],
                                               batch_smpw[0:real_batch_size, ...])

            # region  保存预测的结果
            # 预测标签
            pred_label = np.argmax(vote_label_pool, 1)
            pred_labelR = np.reshape(pred_label, (pred_label.size, 1))

            # 点云-真值-预测标签
            pcrgb_ll = np.hstack((whole_scene_data, whole_scene_labelR, pred_labelR))

            # ---------- 保存成 txt ----------
            pathTXT = os.path.join(visual_dir, scene_id_name[batch_idx] + '.txt')
            np.savetxt(pathTXT, pcrgb_ll, fmt='%f', delimiter='\t')
            log_string('save:' + pathTXT)
            # ---------- 保存成 las ----------
            pathLAS = os.path.join(visual_dir, scene_id_name[batch_idx] + '.las')
            SaveResultLAS(pathLAS, pcrgb_ll[:,0:3], pcrgb_ll[:,3:6], pcrgb_ll[:,6], pcrgb_ll[:,7])
            log_string('save:' + pathLAS)
            # endregion


            # IOU 临时结果
            total_seen_class_tmp = [0 for _ in range(classNumber)]
            total_correct_class_tmp = [0 for _ in range(classNumber)]
            total_iou_deno_class_tmp = [0 for _ in range(classNumber)]
            
            for l in range(classNumber):
                total_seen_class_tmp[l] += np.sum((whole_scene_label == l))
                total_correct_class_tmp[l] += np.sum((pred_label == l) & (whole_scene_label == l))
                total_iou_deno_class_tmp[l] += np.sum(((pred_label == l) | (whole_scene_label == l)))
                total_seen_class[l] += total_seen_class_tmp[l]
                total_correct_class[l] += total_correct_class_tmp[l]
                total_iou_deno_class[l] += total_iou_deno_class_tmp[l]

            iou_map = np.array(total_correct_class_tmp) / (np.array(total_iou_deno_class_tmp, dtype=np.float64) + 1e-6)
            print(iou_map)
            arr = np.array(total_seen_class_tmp)
            tmp_iou = np.mean(iou_map[arr != 0])
            log_string('Mean IoU of %s: %.4f' % (scene_id_name[batch_idx], tmp_iou))


        IoU = np.array(total_correct_class) / (np.array(total_iou_deno_class, dtype=np.float64) + 1e-6)
        iou_per_class_str = '----- IoU -----\n'
        for l in range(classNumber):
            iou_per_class_str += 'class %s, IoU: %.3f \n' % (
                seg_label_to_cat[l] + ' ' * (14 - len(seg_label_to_cat[l])),
                total_correct_class[l] / float(total_iou_deno_class[l]))
        log_string(iou_per_class_str)
        log_string('eval point avg class IoU: %f' % np.mean(IoU))
        log_string('eval whole scene point avg class acc: %f' % (
            np.mean(np.array(total_correct_class) / (np.array(total_seen_class, dtype=np.float64) + 1e-6))))
        log_string('eval whole scene point accuracy: %f' % (
                np.sum(total_correct_class) / float(np.sum(total_seen_class) + 1e-6)))

    log_string('--------------------------------------\n\n')

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mfbz.cn/a/329560.html

如若内容造成侵权/违法违规/事实不符,请联系我们进行投诉反馈qq邮箱809451989@qq.com,一经查实,立即删除!

相关文章

2.6、云负载均衡产品详述

一、定义 弹性负载均衡(Elastic Load Balance&#xff0c;简称ELB)可将来自公网的访问流量分发到后端云主机&#xff0c;可选多种负载均衡策略&#xff0c;并支持自动检测云主机健康状况&#xff0c;消除单点故障&#xff0c;保障应用系统的高可用。 二、产品架构 1&am…

GEE:随机森林分类器投票方法的优化与修改

作者:CSDN @ _养乐多_ 在随机森林中,每棵决策树都对输入数据进行分类或回归,并产生一个输出。对于分类问题,这个输出通常是一个类别标签,而对于回归问题,输出通常是一个连续的数值。在分类问题中,每棵决策树会为每个样本投票,然后采用众数来确定最终的类别。例如,如果…

P3952 [NOIP2017 提高组] 时间复杂度————C++

目录 [NOIP2017 提高组] 时间复杂度题目背景题目描述输入格式输出格式样例 #1样例输入 #1样例输出 #1 提示 解题思路Code运行结果 [NOIP2017 提高组] 时间复杂度 题目背景 NOIP2017 提高组 D1T2 题目描述 小明正在学习一种新的编程语言 A&#xff0c;刚学会循环语句的他激动…

ilqr 算法说明

1 Introduction 希望能用比较简单的方式将ilqr算法进行整理和总结。 2 HJB方程 假定我们现在需要完成一个从A点到B点的任务&#xff0c;执行这段任务的时候&#xff0c;每一步都需要消耗能量&#xff0c;可以用下面这个图表示。 我们在执行这个A点到B点的任务的时候&#xf…

项目架构之Zabbix部署

1 项目架构 1.1 项目架构的组成 业务架构&#xff1a;客户端 → 防火墙 → 负载均衡&#xff08;四层、七层&#xff09; → web缓存/应用 → 业务逻辑&#xff08;动态应用&#xff09; → 数据缓存 → 数据持久层 运维架构&#xff1a;运维客户端 → 跳板机/堡垒机&#x…

2023年第十四届蓝桥杯软件赛省赛总评

报名明年4月蓝桥杯软件赛的同学们&#xff0c;如果你是大一零基础&#xff0c;目前懵懂中&#xff0c;不知该怎么办&#xff0c;可以看看本博客系列&#xff1a;备赛20周合集 20周的完整安排请点击&#xff1a;20周计划 每周发1个博客&#xff0c;共20周。 在QQ群上交流答疑&am…

VUE工程化--vue组件注册

组件注册的两种方式&#xff1a; 1. 局部注册&#xff1a;只能在注册的组件内使用 2. 全局注册&#xff1a;所有组件内都能使用 局部注册步骤&#xff1a; 1、导入 import MyHeader from "./components/myHeader.vue"; import MyMain from "./components/myMa…

TCP连接TIME_WAIT

TCP断开过程: TIME_WAIT的作用: TIME_WAIT状态存在的理由&#xff1a; 1&#xff09;可靠地实现TCP全双工连接的终止 在进行关闭连接四次挥手协议时&#xff0c;最后的ACK是由主动关闭端发出的&#xff0c;如果这个最终的ACK丢失&#xff0c;服务器将重发最终的FIN&#xf…

LLM漫谈(三)| 使用Chainlit和LangChain构建文档问答的LLM应用程序

一、Chainlit介绍 Chainlit是一个开源Python包&#xff0c;旨在彻底改变构建和共享语言模型&#xff08;LM&#xff09;应用程序的方式。Chainlit可以创建用户界面&#xff08;UI&#xff09;&#xff0c;类似于由OpenAI开发的ChatGPT用户界面&#xff0c;Chainlit可以开发类似…

虚拟机CentOS7.5编译安装Qt4.8.7

虚拟机CentOS7.5编译安装Qt4.8.7 一.下载Qt二.安装步骤 一.下载Qt 官网下载链接&#xff1a;Qt4.8.7 官网下载速度可能会非常慢&#xff0c;本人已上传至CSDN&#xff0c;点此下载&#xff0c;下载后需要先用7z软件解压成zip包。 二.安装步骤 环境安装 yum install libX11…

go语言(三)----函数

1、函数单变量返回 package mainimport "fmt"func fool(a string,b int) int {fmt.Println("a ",a)fmt.Println("b ",b)c : 100return c}func main() {c : fool("abc",555)fmt.Println("c ",c)}2、函数多变量返回 pack…

Nsis打包Unity Exe文件(通用)

Nsi 脚本 !include "MUI2.nsh"#使用现代UI Unicode true #使用Unicode !define EXENAME "exeName" #定义常量 exe名称 !define SHORTCUT "快捷方式名称" #定义桌面快捷方式的中文名称Name ${EXENAME} #安装程序的title OutFile "${EXENAME…

【C++】入门C++前想要了解的小知识

个人主页 &#xff1a; zxctsclrjjjcph 文章封面来自&#xff1a;艺术家–贤海林 如有转载请先通知 目录 1. 前言2. 什么是C3. C的发展史4. C的重要性4.1 语言的使用广泛度4.2 在工作领域中4.3 在校招领域中 5. 如何学习C5.1 看看别人怎么学习的5.2 自己怎么学 1. 前言 今天开…

FFmpeg之SwrRessample

文章目录 一、概述二、重采样流程三、重要结构体3.1、SwrContext3.2、ResamplerContext 四、重要函数4.1、swr_alloc4.2、swr_alloc_set_opts4.3、av_opt_set_*4.4、swr_init4.5、av_samples_alloc_array_and_samples4.6、av_samples_alloc4.7、swr_convert4.8、swr_get_delay4…

uniapp微信小程序投票系统实战 (SpringBoot2+vue3.2+element plus ) -投票帖子排行实现

锋哥原创的uniapp微信小程序投票系统实战&#xff1a; uniapp微信小程序投票系统实战课程 (SpringBoot2vue3.2element plus ) ( 火爆连载更新中... )_哔哩哔哩_bilibiliuniapp微信小程序投票系统实战课程 (SpringBoot2vue3.2element plus ) ( 火爆连载更新中... )共计21条视频…

【idea】idea插件编写教程,博主原创idea插件已上架idea插件市场 欢迎下载

前言&#xff1a;经常使用Objects.equals(a,b)方法的同学 应该或多或少都会因为粗心而传错参&#xff0c; 例如日常开发中 我们使用Objects.equals去比较 status(入参)&#xff0c;statusEnum(枚举), 很容易忘记statusEnum.getCode() 或 statusEnum.getVaule() &#xff0c;再比…

Java可视化物联网智慧工地综合云平台源码 私有化部署

智慧工地平台围绕建筑施工人、物、事的安全管理为核心&#xff0c;对应研发了劳务实名制、视频监控、扬尘监测、起重机械安全监测、安全帽监测等功能一体化管理的解决方案。 智慧工地是聚焦工程施工现场&#xff0c;紧紧围绕人、机、料、法、环等关键要素&#xff0c;综合运用…

docker安装运行CloudBeaver并设置默认语言为中文

1、CloudBeaver CloudBeaver 是一个开源的 Web 数据库管理工具&#xff0c;它提供了一个基于浏览器的用户界面&#xff0c;允许用户管理和操作各种类型的数据库。CloudBeaver 支持多种数据库系统&#xff0c;包括但不限于 PostgreSQL、MySQL、SQLite、Oracle、SQL Server 以及…

RabbitMQ入门精讲

1. 什么是消息队列 消息指的是两个应用间传递的数据。数据的类型有很多种形式&#xff0c;可能只包含文本字符串&#xff0c;也可能包含嵌入对象。 “消息队列(Message Queue)”是在消息的传输过程中保存消息的容器。在消息队列中&#xff0c;通常有生产者和消费者两个角色。…

【Java基础_01】Java运行机制及运行过程

【Java基础_01】Java运行机制及运行过程 文章目录 【Java基础_01】Java运行机制及运行过程1.Java 运行机制及运行过程1.1 Java 核心机制-Java 虚拟机 [JVM java virtual machine] 1.2 JDK&#xff0c;JRE1.3 JVM,JDK和JRE1.4 环境变量path1.4.1 为什么要配置path1.4.2 配置环…
最新文章