当前位置: 首页 > news >正文

矩阵补全IGMC 学习笔记

目录

Inductive Graph-based Matrix Completion (IGMC) 模型

igmc推理示例:


Inductive Graph-based Matrix Completion (IGMC) 模型

原版代码:

IGMC/models.py at master · muhanzhang/IGMC · GitHub

GNN推理示例

torch_geometric版本:torch_geometric-2.5.3

原版报错,edge_type找不到,通过删除参数修正的:

import torch
import torch.nn.functional as F
from torch.nn import Linear
from torch_geometric.nn import GCNConv, global_add_pool
from torch_geometric.utils import dropout_adj
from torch_geometric.data import Data, DataLoader
class GNN(torch.nn.Module):# a base GNN class, GCN message passing + sum_poolingdef __init__(self, dataset, gconv=GCNConv, latent_dim=[32, 32, 32, 1],regression=False, adj_dropout=0.2, force_undirected=False):super(GNN, self).__init__()self.regression = regressionself.adj_dropout = adj_dropoutself.force_undirected = force_undirectedself.convs = torch.nn.ModuleList()self.convs.append(gconv(dataset.num_features, latent_dim[0]))for i in range(0, len(latent_dim)-1):self.convs.append(gconv(latent_dim[i], latent_dim[i+1]))self.lin1 = Linear(sum(latent_dim), 128)if self.regression:self.lin2 = Linear(128, 1)else:self.lin2 = Linear(128, dataset.num_classes)def reset_parameters(self):for conv in self.convs:conv.reset_parameters()self.lin1.reset_parameters()self.lin2.reset_parameters()def forward(self, data):x, edge_index, batch = data.x, data.edge_index, data.batchif self.adj_dropout > 0:edge_index, _ = dropout_adj(edge_index, p=self.adj_dropout,force_undirected=self.force_undirected, num_nodes=len(x),training=self.training)concat_states = []for conv in self.convs:x = torch.tanh(conv(x, edge_index))concat_states.append(x)concat_states = torch.cat(concat_states, 1)x = global_add_pool(concat_states, batch)x = F.relu(self.lin1(x))x = F.dropout(x, p=0.5, training=self.training)x = self.lin2(x)if self.regression:return x[:, 0]else:return F.log_softmax(x, dim=-1)def __repr__(self):return self.__class__.__name__# 创建一个简单的数据类,用于模拟数据集属性
class SimpleDataset:num_features = 2num_classes = 2# 创建一个简单的图数据集
edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 3, 2]], dtype=torch.long)
x = torch.tensor([[1, 0], [0, 1], [1, 0], [0, 1]], dtype=torch.float)
batch = torch.tensor([0, 0, 1, 1], dtype=torch.long)# 使用 Data 类构建图数据
data = Data(x=x, edge_index=edge_index, batch=batch)# 构建 DataLoader
loader = DataLoader([data], batch_size=2, shuffle=False)dataset = SimpleDataset()# 实例化模型
model = GNN(dataset)# 模型推理
model.eval()
for data in loader:out = model(data)print(out)

igmc推理示例:


import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Linear, Conv1d
from torch_geometric.nn import GCNConv, RGCNConv, global_sort_pool, global_add_pool
from torch_geometric.utils import dropout_adj
from util_functions import *
import pdb
import time
from torch_geometric.data import Data, DataLoader
class GNN(torch.nn.Module):# a base GNN class, GCN message passing + sum_poolingdef __init__(self, dataset, gconv=GCNConv, latent_dim=[32, 32, 32, 1],regression=False, adj_dropout=0.2, force_undirected=False):super(GNN, self).__init__()self.regression = regressionself.adj_dropout = adj_dropoutself.force_undirected = force_undirectedself.convs = torch.nn.ModuleList()self.convs.append(gconv(dataset.num_features, latent_dim[0]))for i in range(0, len(latent_dim)-1):self.convs.append(gconv(latent_dim[i], latent_dim[i+1]))self.lin1 = Linear(sum(latent_dim), 128)if self.regression:self.lin2 = Linear(128, 1)else:self.lin2 = Linear(128, dataset.num_classes)def reset_parameters(self):for conv in self.convs:conv.reset_parameters()self.lin1.reset_parameters()self.lin2.reset_parameters()def forward(self, data):x, edge_index, batch = data.x, data.edge_index, data.batchif self.adj_dropout > 0:# edge_index, edge_type = dropout_adj(#     edge_index, edge_type, p=self.adj_dropout,#     force_undirected=self.force_undirected, num_nodes=len(x),#     training=self.training# )edge_index, edge_type = dropout_adj(edge_index, p=self.adj_dropout, force_undirected=self.force_undirected, num_nodes=len(x), training=self.training)concat_states = []for conv in self.convs:x = torch.tanh(conv(x, edge_index))concat_states.append(x)concat_states = torch.cat(concat_states, 1)x = global_add_pool(concat_states, batch)x = F.relu(self.lin1(x))x = F.dropout(x, p=0.5, training=self.training)x = self.lin2(x)if self.regression:return x[:, 0]else:return F.log_softmax(x, dim=-1)def __repr__(self):return self.__class__.__name__
class IGMC(GNN):# The GNN model of Inductive Graph-based Matrix Completion.# Use RGCN convolution + center-nodes readout.def __init__(self, dataset, gconv=RGCNConv, latent_dim=[32, 32, 32, 32],num_relations=5, num_bases=2, regression=False, adj_dropout=0.2,force_undirected=False, side_features=False, n_side_features=0,multiply_by=1):super(IGMC, self).__init__(dataset, GCNConv, latent_dim, regression, adj_dropout, force_undirected)self.multiply_by = multiply_byself.convs = torch.nn.ModuleList()self.convs.append(gconv(dataset.num_features, latent_dim[0], num_relations, num_bases))for i in range(0, len(latent_dim)-1):self.convs.append(gconv(latent_dim[i], latent_dim[i+1], num_relations, num_bases))self.lin1 = Linear(2*sum(latent_dim), 128)self.side_features = side_featuresif side_features:self.lin1 = Linear(2*sum(latent_dim)+n_side_features, 128)def forward(self, data):start = time.time()x, edge_index, edge_type, batch = data.x, data.edge_index, data.edge_type, data.batchif self.adj_dropout > 0:edge_index, edge_type = dropout_adj(edge_index, edge_type, p=self.adj_dropout,force_undirected=self.force_undirected, num_nodes=len(x),training=self.training)concat_states = []for conv in self.convs:x = torch.tanh(conv(x, edge_index, edge_type))concat_states.append(x)concat_states = torch.cat(concat_states, 1)users = data.x[:, 0] == 1items = data.x[:, 1] == 1x = torch.cat([concat_states[users], concat_states[items]], 1)if self.side_features:x = torch.cat([x, data.u_feature, data.v_feature], 1)x = F.relu(self.lin1(x))x = F.dropout(x, p=0.5, training=self.training)x = self.lin2(x)if self.regression:return x[:, 0] * self.multiply_byelse:return F.log_softmax(x, dim=-1)class SimpleDataset:num_features = 2num_classes = 2# 创建一个简单的图数据集
edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 3, 2]], dtype=torch.long)
edge_type = torch.tensor([0, 1, 2, 3], dtype=torch.long)
x = torch.tensor([[1, 0], [0, 1], [1, 0], [0, 1]], dtype=torch.float)
batch = torch.tensor([0, 0, 1, 1], dtype=torch.long)# 使用 Data 类构建图数据
data = Data(x=x, edge_index=edge_index,edge_type=edge_type, batch=batch)# 构建 DataLoader
loader = DataLoader([data], batch_size=2, shuffle=False)dataset = SimpleDataset()# 实例化模型
model = IGMC(dataset)# 模型推理
model.eval()
for data in loader:out = model(data)print(out)

http://www.lryc.cn/news/375270.html

相关文章:

  • 面试题之CSS
  • MFC扩展库BCGControlBar Pro v35.0新版亮点:重新设计的工具栏编辑器等
  • python调用SDK的问题
  • html入门综合练习
  • 函数模板的具体化
  • 【Linux 内存管理】
  • AJAX 数据库
  • 力扣719.找出第K小的数对距离
  • 富格林:掌握可信出金交易策略
  • HCS-华为云Stack-容器网络
  • 【CSS in Depth2精译】1.1 层叠
  • 【读博日记】拓扑结构(待修正)
  • QT 中setVisible()和setEnabled()的区别
  • 速度(velocity)、加速度(acceleration)、急动度(jerk)和弹跳度(snap)傻傻分不清楚?
  • 【YashanDB知识库】PHP使用ODBC使用数据库绑定参数功能异常
  • 初级篇-Docker容器知识
  • 【抽代复习笔记】19-群(十三):奇偶置换、循环置换的几个定理及例题
  • RT-Thread简介及启动流程分析
  • MCU嵌入式AI开发笔记-视频笔记同步更新
  • DoIP——step2:车辆发现
  • 【动态规划】0-1背包问题
  • WordPress 高级缓存插件 W3 Total Cache Pro 详细配置教程
  • 每日一题——Python实现PAT乙级1012 数字分类(举一反三+思想解读+逐步优化)五千字好文
  • Unity2D游戏制作入门 | 13 ( 之人物三段攻击 )
  • DAY04 HTMLCSS
  • Linux_理解程序地址空间和页表
  • NAND闪存市场彻底复苏
  • 过拟合与正则化
  • VMware挂载NAS存储异常处理
  • Redis 7.x 系列【4】命令手册