-
Notifications
You must be signed in to change notification settings - Fork 5
/
pointnet2_cls_msg.py
executable file
·47 lines (37 loc) · 2.27 KB
/
pointnet2_cls_msg.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
from util.tf_util import *
from util.pointnet_util import pointnet_sa_module, pointnet_sa_module_msg
import tensorflow as tf
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size))
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
l0_xyz = point_cloud
l0_points = None
# Set abstraction layers
l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points, 512, [0.1, 0.2, 0.4], [16, 32, 128],
[[32, 32, 64], [64, 64, 128], [64, 96, 128]], is_training, bn_decay,
scope='layer1', use_nchw=True)
l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points, 128, [0.2, 0.4, 0.8], [32, 64, 128],
[[64, 64, 128], [128, 128, 256], [128, 128, 256]], is_training, bn_decay,
scope='layer2')
l3_xyz, l3_points, _ = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None,
mlp=[256, 512, 1024], mlp2=None, group_all=True, is_training=is_training,
bn_decay=bn_decay, scope='layer3')
# Fully connected layers
net = tf.reshape(l3_points, [batch_size, -1])
net = fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = dropout(net, keep_prob=0.4, is_training=is_training, scope='dp1')
net = fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = dropout(net, keep_prob=0.4, is_training=is_training, scope='dp2')
net = fully_connected(net, 40, activation_fn=None, scope='fc3')
return net
def get_loss(pred, label):
""" pred: B*NUM_CLASSES,
label: B, """
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
tf.add_to_collection('losses', classify_loss)