深度学习之图像分割从入门到精通——基于unet++实现细胞分割

模型

import torch
from torch import nn

__all__ = ['UNet', 'NestedUNet']


class VGGBlock(nn.Module):
    def __init__(self, in_channels, middle_channels, out_channels):
        super().__init__()
        self.relu = nn.ReLU(inplace=True)
        self.conv1 = nn.Conv2d(in_channels, middle_channels, 3, padding=1)
        self.bn1 = nn.BatchNorm2d(middle_channels)
        self.conv2 = nn.Conv2d(middle_channels, out_channels, 3, padding=1)
        self.bn2 = nn.BatchNorm2d(out_channels)

    def forward(self, x):
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        return out


class UNet(nn.Module):
    def __init__(self, num_classes, input_channels=3, **kwargs):
        super().__init__()

        nb_filter = [32, 64, 128, 256, 512]

        self.pool = nn.MaxPool2d(2, 2)
        self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)#scale_factor:放大的倍数  插值

        self.conv0_0 = VGGBlock(input_channels, nb_filter[0], nb_filter[0])
        self.conv1_0 = VGGBlock(nb_filter[0], nb_filter[1], nb_filter[1])
        self.conv2_0 = VGGBlock(nb_filter[1], nb_filter[2], nb_filter[2])
        self.conv3_0 = VGGBlock(nb_filter[2], nb_filter[3], nb_filter[3])
        self.conv4_0 = VGGBlock(nb_filter[3], nb_filter[4], nb_filter[4])

        self.conv3_1 = VGGBlock(nb_filter[3]+nb_filter[4], nb_filter[3], nb_filter[3])
        self.conv2_2 = VGGBlock(nb_filter[2]+nb_filter[3], nb_filter[2], nb_filter[2])
        self.conv1_3 = VGGBlock(nb_filter[1]+nb_filter[2], nb_filter[1], nb_filter[1])
        self.conv0_4 = VGGBlock(nb_filter[0]+nb_filter[1], nb_filter[0], nb_filter[0])

        self.final = nn.Conv2d(nb_filter[0], num_classes, kernel_size=1)


    def forward(self, input):
        x0_0 = self.conv0_0(input)
        x1_0 = self.conv1_0(self.pool(x0_0))
        x2_0 = self.conv2_0(self.pool(x1_0))
        x3_0 = self.conv3_0(self.pool(x2_0))
        x4_0 = self.conv4_0(self.pool(x3_0))

        x3_1 = self.conv3_1(torch.cat([x3_0, self.up(x4_0)], 1))
        x2_2 = self.conv2_2(torch.cat([x2_0, self.up(x3_1)], 1))
        x1_3 = self.conv1_3(torch.cat([x1_0, self.up(x2_2)], 1))
        x0_4 = self.conv0_4(torch.cat([x0_0, self.up(x1_3)], 1))

        output = self.final(x0_4)
        return output


class NestedUNet(nn.Module):
    def __init__(self, num_classes, input_channels=3, deep_supervision=False, **kwargs):
        super().__init__()

        nb_filter = [32, 64, 128, 256, 512]

        self.deep_supervision = deep_supervision

        self.pool = nn.MaxPool2d(2, 2)
        self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)

        self.conv0_0 = VGGBlock(input_channels, nb_filter[0], nb_filter[0])
        self.conv1_0 = VGGBlock(nb_filter[0], nb_filter[1], nb_filter[1])
        self.conv2_0 = VGGBlock(nb_filter[1], nb_filter[2], nb_filter[2])
        self.conv3_0 = VGGBlock(nb_filter[2], nb_filter[3], nb_filter[3])
        self.conv4_0 = VGGBlock(nb_filter[3], nb_filter[4], nb_filter[4])

        self.conv0_1 = VGGBlock(nb_filter[0]+nb_filter[1], nb_filter[0], nb_filter[0])
        self.conv1_1 = VGGBlock(nb_filter[1]+nb_filter[2], nb_filter[1], nb_filter[1])
        self.conv2_1 = VGGBlock(nb_filter[2]+nb_filter[3], nb_filter[2], nb_filter[2])
        self.conv3_1 = VGGBlock(nb_filter[3]+nb_filter[4], nb_filter[3], nb_filter[3])

        self.conv0_2 = VGGBlock(nb_filter[0]*2+nb_filter[1], nb_filter[0], nb_filter[0])
        self.conv1_2 = VGGBlock(nb_filter[1]*2+nb_filter[2], nb_filter[1], nb_filter[1])
        self.conv2_2 = VGGBlock(nb_filter[2]*2+nb_filter[3], nb_filter[2], nb_filter[2])

        self.conv0_3 = VGGBlock(nb_filter[0]*3+nb_filter[1], nb_filter[0], nb_filter[0])
        self.conv1_3 = VGGBlock(nb_filter[1]*3+nb_filter[2], nb_filter[1], nb_filter[1])

        self.conv0_4 = VGGBlock(nb_filter[0]*4+nb_filter[1], nb_filter[0], nb_filter[0])

        if self.deep_supervision:
            self.final1 = nn.Conv2d(nb_filter[0], num_classes, kernel_size=1)
            self.final2 = nn.Conv2d(nb_filter[0], num_classes, kernel_size=1)
            self.final3 = nn.Conv2d(nb_filter[0], num_classes, kernel_size=1)
            self.final4 = nn.Conv2d(nb_filter[0], num_classes, kernel_size=1)
        else:
            self.final = nn.Conv2d(nb_filter[0], num_classes, kernel_size=1)


    def forward(self, input):
        # print('input:',input.shape)
        x0_0 = self.conv0_0(input)
        # print('x0_0:',x0_0.shape)
        x1_0 = self.conv1_0(self.pool(x0_0))
        # print('x1_0:',x1_0.shape)
        x0_1 = self.conv0_1(torch.cat([x0_0, self.up(x1_0)], 1))
        # print('x0_1:',x0_1.shape)

        x2_0 = self.conv2_0(self.pool(x1_0))
        # print('x2_0:',x2_0.shape)
        x1_1 = self.conv1_1(torch.cat([x1_0, self.up(x2_0)], 1))
        # print('x1_1:',x1_1.shape)
        x0_2 = self.conv0_2(torch.cat([x0_0, x0_1, self.up(x1_1)], 1))
        # print('x0_2:',x0_2.shape)

        x3_0 = self.conv3_0(self.pool(x2_0))
        # print('x3_0:',x3_0.shape)
        x2_1 = self.conv2_1(torch.cat([x2_0, self.up(x3_0)], 1))
        # print('x2_1:',x2_1.shape)
        x1_2 = self.conv1_2(torch.cat([x1_0, x1_1, self.up(x2_1)], 1))
        # print('x1_2:',x1_2.shape)
        x0_3 = self.conv0_3(torch.cat([x0_0, x0_1, x0_2, self.up(x1_2)], 1))
        # print('x0_3:',x0_3.shape)
        x4_0 = self.conv4_0(self.pool(x3_0))
        # print('x4_0:',x4_0.shape)
        x3_1 = self.conv3_1(torch.cat([x3_0, self.up(x4_0)], 1))
        # print('x3_1:',x3_1.shape)
        x2_2 = self.conv2_2(torch.cat([x2_0, x2_1, self.up(x3_1)], 1))
        # print('x2_2:',x2_2.shape)
        x1_3 = self.conv1_3(torch.cat([x1_0, x1_1, x1_2, self.up(x2_2)], 1))
        # print('x1_3:',x1_3.shape)
        x0_4 = self.conv0_4(torch.cat([x0_0, x0_1, x0_2, x0_3, self.up(x1_3)], 1))
        # print('x0_4:',x0_4.shape)

        if self.deep_supervision:
            output1 = self.final1(x0_1)
            output2 = self.final2(x0_2)
            output3 = self.final3(x0_3)
            output4 = self.final4(x0_4)
            return [output1, output2, output3, output4]

        else:
            output = self.final(x0_4)
            return output

损失函数

BCEDiceLoss:
  • 这个损失函数结合了二元交叉熵损失(Binary Cross Entropy, BCE)和 Dice Loss。
  • BCE 于衡量模型输出和真实标签之间的二值化像素级别匹配情况。
  • Dice Loss 用于量模型输出和真实标签之间的相似度,但这里采用了一种稍微不同的计算方式,即将 Dice Loss 作为 1 减去 Dice 相似度的平均值,这样得到的损失越小,说明相似度越高。
LovaszHingeLoss:
  • 这个损失函数采用的是 Lovasz-Hinge Loss,它是一种用于处理不平衡数据集的损失函数,尤其适用于像素级别的分类任务。
  • Lovasz-Hinge Loss 能够更好地处理类别不平衡和边界情况,相比于交叉熵损失,在处理不平衡数据时更加稳定。
    LovaszHingeLoss相关介绍
测试用例:

lovasz_losses.py 相关内容

"""
Lovasz-Softmax and Jaccard hinge loss in PyTorch
Maxim Berman 2018 ESAT-PSI KU Leuven (MIT License)
"""

from __future__ import print_function, division

import torch
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np

try:
    from itertools import ifilterfalse
except ImportError:  # py3k
    from itertools import filterfalse as ifilterfalse


def lovasz_grad(gt_sorted):
    """
    Computes gradient of the Lovasz extension w.r.t sorted errors
    See Alg. 1 in paper
    """
    p = len(gt_sorted)
    gts = gt_sorted.sum()
    intersection = gts - gt_sorted.float().cumsum(0)
    union = gts + (1 - gt_sorted).float().cumsum(0)
    jaccard = 1. - intersection / union
    if p > 1:  # cover 1-pixel case
        jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
    return jaccard


def iou_binary(preds, labels, EMPTY=1., ignore=None, per_image=True):
    """
    IoU for foreground class
    binary: 1 foreground, 0 background
    """
    if not per_image:
        preds, labels = (preds,), (labels,)
    ious = []
    for pred, label in zip(preds, labels):
        intersection = ((label == 1) & (pred == 1)).sum()
        union = ((label == 1) | ((pred == 1) & (label != ignore))).sum()
        if not union:
            iou = EMPTY
        else:
            iou = float(intersection) / float(union)
        ious.append(iou)
    iou = mean(ious)  # mean accross images if per_image
    return 100 * iou


def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False):
    """
    Array of IoU for each (non ignored) class
    """
    if not per_image:
        preds, labels = (preds,), (labels,)
    ious = []
    for pred, label in zip(preds, labels):
        iou = []
        for i in range(C):
            if i != ignore:  # The ignored label is sometimes among predicted classes (ENet - CityScapes)
                intersection = ((label == i) & (pred == i)).sum()
                union = ((label == i) | ((pred == i) & (label != ignore))).sum()
                if not union:
                    iou.append(EMPTY)
                else:
                    iou.append(float(intersection) / float(union))
        ious.append(iou)
    ious = [mean(iou) for iou in zip(*ious)]  # mean accross images if per_image
    return 100 * np.array(ious)


# --------------------------- BINARY LOSSES ---------------------------


def lovasz_hinge(logits, labels, per_image=True, ignore=None):
    """
    Binary Lovasz hinge loss
      logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
      labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
      per_image: compute the loss per image instead of per batch
      ignore: void class id
    """
    if per_image:
        loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore))
                    for log, lab in zip(logits, labels))
    else:
        loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))
    return loss


def lovasz_hinge_flat(logits, labels):
    """
    Binary Lovasz hinge loss
      logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
      labels: [P] Tensor, binary ground truth labels (0 or 1)
      ignore: label to ignore
    """
    if len(labels) == 0:
        # only void pixels, the gradients should be 0
        return logits.sum() * 0.
    signs = 2. * labels.float() - 1.
    errors = (1. - logits * Variable(signs))
    errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
    perm = perm.data
    gt_sorted = labels[perm]
    grad = lovasz_grad(gt_sorted)
    loss = torch.dot(F.relu(errors_sorted), Variable(grad))
    return loss


def flatten_binary_scores(scores, labels, ignore=None):
    """
    Flattens predictions in the batch (binary case)
    Remove labels equal to 'ignore'
    """
    scores = scores.view(-1)
    labels = labels.view(-1)
    if ignore is None:
        return scores, labels
    valid = (labels != ignore)
    vscores = scores[valid]
    vlabels = labels[valid]
    return vscores, vlabels


class StableBCELoss(torch.nn.modules.Module):
    def __init__(self):
        super(StableBCELoss, self).__init__()

    def forward(self, input, target):
        neg_abs = - input.abs()
        loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log()
        return loss.mean()


def binary_xloss(logits, labels, ignore=None):
    """
    Binary Cross entropy loss
      logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
      labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
      ignore: void class id
    """
    logits, labels = flatten_binary_scores(logits, labels, ignore)
    loss = StableBCELoss()(logits, Variable(labels.float()))
    return loss


# --------------------------- MULTICLASS LOSSES ---------------------------


def lovasz_softmax(probas, labels, classes='present', per_image=False, ignore=None):
    """
    Multi-class Lovasz-Softmax loss
      probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1).
              Interpreted as binary (sigmoid) output with outputs of size [B, H, W].
      labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
      classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
      per_image: compute the loss per image instead of per batch
      ignore: void class labels
    """
    if per_image:
        loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes)
                    for prob, lab in zip(probas, labels))
    else:
        loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), classes=classes)
    return loss


def lovasz_softmax_flat(probas, labels, classes='present'):
    """
    Multi-class Lovasz-Softmax loss
      probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
      labels: [P] Tensor, ground truth labels (between 0 and C - 1)
      classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
    """
    if probas.numel() == 0:
        # only void pixels, the gradients should be 0
        return probas * 0.
    C = probas.size(1)
    losses = []
    class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
    for c in class_to_sum:
        fg = (labels == c).float()  # foreground for class c
        if (classes == 'present' and fg.sum() == 0):
            continue
        if C == 1:
            if len(classes) > 1:
                raise ValueError('Sigmoid output possible only with 1 class')
            class_pred = probas[:, 0]
        else:
            class_pred = probas[:, c]
        errors = (Variable(fg) - class_pred).abs()
        errors_sorted, perm = torch.sort(errors, 0, descending=True)
        perm = perm.data
        fg_sorted = fg[perm]
        losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))
    return mean(losses)


def flatten_probas(probas, labels, ignore=None):
    """
    Flattens predictions in the batch
    """
    if probas.dim() == 3:
        # assumes output of a sigmoid layer
        B, H, W = probas.size()
        probas = probas.view(B, 1, H, W)
    B, C, H, W = probas.size()
    probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C)  # B * H * W, C = P, C
    labels = labels.view(-1)
    if ignore is None:
        return probas, labels
    valid = (labels != ignore)
    vprobas = probas[valid.nonzero().squeeze()]
    vlabels = labels[valid]
    return vprobas, vlabels


def xloss(logits, labels, ignore=None):
    """
    Cross entropy loss
    """
    return F.cross_entropy(logits, Variable(labels), ignore_index=255)


# --------------------------- HELPER FUNCTIONS ---------------------------
def isnan(x):
    return x != x


def mean(l, ignore_nan=False, empty=0):
    """
    nanmean compatible with generators.
    """
    l = iter(l)
    if ignore_nan:
        l = ifilterfalse(isnan, l)
    try:
        n = 1
        acc = next(l)
    except StopIteration:
        if empty == 'raise':
            raise ValueError('Empty mean')
        return empty
    for n, v in enumerate(l, 2):
        acc += v
    if n == 1:
        return acc
    return acc / n

import torch
import torch.nn as nn
import torch.nn.functional as F
from lovasz_losses import lovasz_hinge

# __all__ = ['BCEDiceLoss', 'LovaszHingeLoss']


class BCEDiceLoss(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, input, target):


        bce = F.binary_cross_entropy_with_logits(input, target)
        smooth = 1e-5
        input = torch.sigmoid(input)
        num = target.size(0)
        input = input.view(num, -1)
        target = target.view(num, -1)
        intersection = (input * target)
        dice = (2. * intersection.sum(1) + smooth) / (input.sum(1) + target.sum(1) + smooth)
        dice = 1 - dice.sum() / num
        return 0.5 * bce + dice


class LovaszHingeLoss(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, input, target):
        input = input.squeeze(1)
        target = target.squeeze(1)
        loss = lovasz_hinge(input, target, per_image=True)

        return loss


if __name__ == '__main__':
    import torch

    # 假设模型输出和真实标签都是二值化的图像,大小为(1, H, W)
    output = torch.tensor([[[0.3, 0.7], [0.8, 0.6]]])  # 模型输出
    # output = output.round().long()


    target = torch.tensor([[[0, 1], [1, 0]]],dtype=torch.float)  # 真实标签

    bce_dice_loss = BCEDiceLoss()
    bce_dice = bce_dice_loss(output, target)

    lovasz_hinge_loss = LovaszHingeLoss()
    lovasz_hinge = lovasz_hinge_loss(output, target)

    print("BCE Dice Loss:", bce_dice)
    print("Lovasz Hinge Loss:", lovasz_hinge)

原理解释和数学公式:

BCEDiceLoss 原理:
  • BCE Dice Loss 结合了二元交叉熵损失和 Dice Loss。其数学表达式如下:

B C E _ D i c e _ L o s s = 0.5 × B C E + ( 1 − D i c e ) BCE\_Dice\_Loss = 0.5 \times BCE + (1 - Dice) BCE_Dice_Loss=0.5×BCE+(1Dice)

其中, B C E BCE BCE 表示二元交叉熵损失, D i c e Dice Dice 表示 Dice 相似度。这个损失函数的目标是最小化二元交叉熵损失和最大化 Dice 相似度,以达到更好的模型训练效果。

LovaszHingeLoss 原理:
  • Lovasz-Hinge Loss 是一种非平衡数据集上的损失函数,用于像素级别的分类任务。其数学表达式如下:

L o v a s z _ H i n g e _ L o s s = lovasz_hinge ( i n p u t , t a r g e t ) Lovasz\_Hinge\_Loss = \text{lovasz\_hinge}(input, target) Lovasz_Hinge_Loss=lovasz_hinge(input,target)

这里的 lovasz_hinge \text{lovasz\_hinge} lovasz_hinge 是一个函数,用于计算 Lovasz-Hinge Loss。

训练

√

评估函数

metrics.py

import numpy as np
import torch
import torch.nn.functional as F


def iou_score(output, target):
    smooth = 1e-5

    if torch.is_tensor(output):
        output = torch.sigmoid(output).data.cpu().numpy()
    if torch.is_tensor(target):
        target = target.data.cpu().numpy()
    output_ = output > 0.5
    target_ = target > 0.5
    intersection = (output_ & target_).sum()
    union = (output_ | target_).sum()

    return (intersection + smooth) / (union + smooth)


def dice_coef(output, target):
    smooth = 1e-5

    output = torch.sigmoid(output).view(-1).data.cpu().numpy()
    target = target.view(-1).data.cpu().numpy()
    intersection = (output * target).sum()

    return (2. * intersection + smooth) / \
        (output.sum() + target.sum() + smooth)


if __name__ == '__main__':
    import numpy as np
    import torch

    # 假设模型输出和真实标签都是二值化的图像,大小为(1, H, W)
    output = torch.tensor([[[0.3, 0.7], [0.8, 0.6]]])  # 模型输出
    target = torch.tensor([[[0, 1], [1, 0]]])  # 真实标签

    iou = iou_score(output, target)
    dice = dice_coef(output, target)

    print("IoU Score:", iou)
    print("Dice Coefficient:", dice)


在这里插入图片描述

IoU(Intersection over Union)评分函数原理

IoU 是一种常用的图像分割评价指标,它衡量了模型输出与真实标签之间的重程度。其数学公式如下:

I o U = T P T P + F P + F N IoU = \frac{{TP}}{{TP + FP + FN}} IoU=TP+FP+FNTP

其中, T P TP TP 表示真正例(模型正确预测为正样本的数量), F P FP FP 表示假正例(模型错误预测为正样本的数量), F N FN FN 表示假负例(模型错误预测为负样本的数量)。

Dice Coefficient评分函数原理

Dice Coefficient 也是一种常用的图像分割评价指标,衡量模型输出和真实标签之间的相似度。其数学公式如下:

D i c e = 2 × T P 2 × T P + F P + F N Dice = \frac{{2 \times TP}}{{2 \times TP + FP + FN}} Dice=2×TP+FP+FN2×TP

其中, T P TP TP 表示真正例, F P FP FP 表示假正例, F N FN FN 表示假负例,与 IoU 公式中的定义相同。

这两个评分函数都以模型的真正例为分子,而分母则是真正例、假正例和假负例的总和,以此来衡量模型预测结果与真实标签的相似程度。公式中的平滑因子用于避免分母为零的情况,增加了数值稳定性。

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mfbz.cn/a/563219.html

如若内容造成侵权/违法违规/事实不符,请联系我们进行投诉反馈qq邮箱809451989@qq.com,一经查实,立即删除!

相关文章

回归预测 | Matlab实现BO-RF贝叶斯优化随机森林多变量回归预测

回归预测 | Matlab实现BO-RF贝叶斯优化随机森林多变量回归预测 目录 回归预测 | Matlab实现BO-RF贝叶斯优化随机森林多变量回归预测效果一览基本介绍程序设计参考资料 效果一览 基本介绍 1.Matlab实现BO-RF贝叶斯优化随机森林多变量回归预测; 2.输入7个特征&#xf…

互联网技术知识点总览——数据库知识点框架

简介 本文对数据库的知识点整体框架进行梳理和分享如下:

Vue3+TS版本Uniapp:封装uni.request请求配置

作者:前端小王hs 阿里云社区博客专家/清华大学出版社签约作者✍/CSDN百万访问博主/B站千粉前端up主 封装请求配置项 封装拦截器封装uni.request 封装拦截器 uniapp的封装逻辑不同于Vue3项目中直接使用axios.create()方法创建实例(在create方法中写入请求…

Oracle中的视图

1- 什么是视图 视图是一个虚拟表 视图是由sql查询语句产生的 视图真实存在 但是不存储数据 视图中的数据 只是对 基表(源数据表) 中的数据的引用 总的来说 视图可以简化数据 用户,订单,物流 三个表进行关联 吧很复杂的sql查询语句存储成一个视图 …

【 AIGC 研究最新方向(下)】面向平面、视觉、时尚设计的高可用 AIGC 研究方向总结

目前面向平面、视觉、时尚等设计领域的高可用 AIGC 方向有以下 4 种: 透明图层生成可控生成图像定制化SVG 生成 本篇(下篇)介绍 3、4,上篇在:https://blog.csdn.net/weixin_44212848/article/details/138035279?spm…

CSS——高级选择器

层次的选择器&#xff1a; <1> 后代选择器&#xff1a; 格式&#xff1a; 标签1 标签2{} 解释&#xff1a; 标签1 不生效&#xff0c;被标签1 嵌套中的 标签2才生效 举例&#xff1a; <!DOCTYPE html> <html lang"en"><head><meta charse…

JVM常见的垃圾回收器

1、回收方法区&#xff1a; 方法区回收价值很低&#xff0c;主要回收废弃的常量和无用的类。 方法区中的存储&#xff1a; 方法区中存储的是加载的类的信息&#xff0c;常量&#xff0c;静态变量&#xff0c;即时编译后的代码等数据&#xff0c;所以回收的对象也就是这些内…

go+react实现远程vCenter虚拟机管理终端

文章目录 React-VcenterDemoQuick Start React-Vcenter 基于go & react实现远程vSphere vcenter虚拟机终端console页面&#xff0c;提供与vcenter管理中的Launch Web Console相同的功能。 项目地址&#xff1a;react-vcenter Demo URL: http://localhost:3000 Quick St…

【leetcode面试经典150题】66. 分隔链表(C++)

【leetcode面试经典150题】专栏系列将为准备暑期实习生以及秋招的同学们提高在面试时的经典面试算法题的思路和想法。本专栏将以一题多解和精简算法思路为主&#xff0c;题解使用C语言。&#xff08;若有使用其他语言的同学也可了解题解思路&#xff0c;本质上语法内容一致&…

第24天:安全开发-PHP应用文件管理模块显示上传黑白名单类型过滤访问控制

第二十四天 一、PHP文件管理-显示&上传功能实现 如果被抓包抓到数据包&#xff0c;并修改Content-Type内容 则也可以绕过筛查 正常进行上传和下载 二、文件上传-$_FILES&过滤机制实现 无过滤机制 黑名单过滤机制 使用 explode 函数通过点号分割文件名&#xff0c;…

基于Java+SpringBoot+Mybaties-plus+Vue+elememt 小区物业管理系统 的设计与实现

一.项目介绍 系统分为管理员 和 业主 两块&#xff1a; 管理员点击进入到系统操作界面&#xff0c;可以对首页、业主信息管理、管理员信息管理、 楼栋和房屋信息管理、物业费管理、地下停车位管理、公告信息管理、报修信息管理、 投诉管理以及个人信息等功能模块 …

温湿度LCD显示并上传服务器

项目需求 通过温湿度传感器将值传到LCD1602&#xff0c;并实时通过蓝牙透传到手机。 硬件介绍 温湿度传感器 DHT11温湿度传感器 DHT11_温湿度传感器数据格式-CSDN博客 LCD1602LCD1602-CSDN博客 HC-01 继电器模块 硬件接线 LCD1602 D0~D7 --> A0~A7VDD, A --> 5v…

MercadoLibre(美客多)入仓预约系统操作流程-自动化约号(开篇)

目录 一、添加货件信息 二、输入货件信息 三、选择发货 四、填写交货日期 五、注意事项 MercadoLibre&#xff08;美客多&#xff09;于2021年10月18号上线了新预约入仓系统&#xff0c;在MercadoLibre美客多平台上&#xff0c;新入仓预约系统是一项非常重要的功能&#x…

23种设计模式之抽象工厂

简单工厂和工厂方法 关注 产品等级 抽象工厂 关注 产品族 对于比较稳定的产品&#xff0c;抽象工厂更有效率&#xff08;一个工厂生产很多产品族&#xff09; 抽象工厂代码例子加深理解

探索UWB模块的潜力:智能家居与物联网的连接者

UWB模块具有精准定位、快速响应、低能耗等特点&#xff0c;在智能家居领域展现出了巨大的潜力&#xff0c;正逐渐成为智能家居与物联网的重要连接者。本文将探讨UWB模块在智能家居与物联网中的关键作用、应用场景以及未来发展趋势&#xff0c;旨在为推动智能家居技术的创新和发…

Springboot的Test单元测试操作

Springboot的Test单元测试操作 简单总结需要操作的步骤 1&#xff0c;导入依赖 2&#xff0c;创建目录&#xff08;目录和启动类的目录保持一致&#xff09; 3&#xff0c;添加注解 4&#xff0c;写方法测试 1&#xff0c;导入依赖 <dependency><groupId>org.spri…

C++修炼之路之多态---多态的原理(虚函数表)

目录 一&#xff1a;多态的原理 1.虚函数表 2.原理分析 3.对于虚表存在哪里的探讨 4.对于是不是所有的虚函数都要存进虚函数表的探讨 二&#xff1a;多继承中的虚函数表 三&#xff1a;常见的问答题 接下来的日子会顺顺利利&#xff0c;万事胜意&#xff0c;生活明朗--…

(51单片机)第十一章-串行口应用提高

11.1 方式0应用 在第6章中&#xff0c;已经对51单片机的串行口结构做过详细介绍&#xff0c;并且通过实例讲解了串行口的4种工作方式中方式1的具体用法&#xff0c;本节详细讲述串行口方式0的用法。 串行口方式0被称为同步移位寄存器的输入/输出方式&#xff0c;主要用于扩展并…

PCDN与边缘计算的集成解决方案

PCDN与边缘计算的集成解决方案 在数字化时代&#xff0c;内容的快速、安全地传递至用户变得至关重要。无论是媒体、教育还是其他领域&#xff0c;所有这些行业都需要强大的技术支持以保证信息的实时更新和安全传输。PCDN&#xff08;Peer Content Delivery Network&#xff0c…

详解QListView、QListWidget、QTableView、QTableWidget的使用以及区别

在Qt框架中&#xff0c;QListView、QListWidget、QTableView和QTableWidget都是用于显示列表或表格数据的控件。它们在用途、数据模型、灵活性以及直接操作数据的便捷性等方面存在一定的差异。下面将详细阐述这些控件的使用方法以及它们之间的区别&#xff0c;并提供相应的C代码…
最新文章