Science Score: 44.0%

This score indicates how likely this project is to be science-related based on various indicators:

  • CITATION.cff file
    Found CITATION.cff file
  • codemeta.json file
    Found codemeta.json file
  • .zenodo.json file
    Found .zenodo.json file
  • DOI references
  • Academic publication links
  • Academic email domains
  • Institutional organization owner
  • JOSS paper metadata
  • Scientific vocabulary similarity
    Low similarity (4.0%) to scientific vocabulary
Last synced: 6 months ago · JSON representation ·

Repository

Basic Info
  • Host: GitHub
  • Owner: huangJC0429
  • Language: Python
  • Default Branch: main
  • Size: 53.3 MB
Statistics
  • Stars: 1
  • Watchers: 1
  • Forks: 0
  • Open Issues: 0
  • Releases: 0
Created over 1 year ago · Last pushed 10 months ago
Metadata Files
Readme Citation

README.md

Enhancing the Influence of Labels on Unlabeled Nodes in Graph Convolutional Networks

This repository contains an implementation of Enhancing the Influence of Labels on Unlabeled Nodes in Graph Convolutional Networks (ICML2025).

Dependencies

  • CUDA 10.2.89
  • python 3.6.8
  • pytorch 1.9.0
  • pyg 2.0.3

Usage

  • For semi-supervised setting, run the following script sh cd Citation bash semi.sh

Pretrain

  • if you want to generate the file from the pretrain model, run the following command

sh python pre_train_MLP.py --dataset cora --hidden_dim 128 python pre_train_MLP.py --dataset citeseer --hidden_dim 256 python pre_train_MLP.py --dataset pubmed --hidden_dim 256 - For the Coauthor dataset sh python pre_train_MLP_coauthor.py --dataset {dataset name}

Owner

  • Name: Jincheng Huang
  • Login: huangJC0429
  • Kind: user
  • Location: China
  • Company: SWPU

hi, I am a graduate student of Southwest Petroleum University

Citation (Citation/FLAN.py)

import torch.nn as nn
import torch.nn.functional as F
import torch
from gcn.layers import GraphConvolution, MLPLayer
from torch_geometric.utils import one_hot, spmm

class FLAN():
    def __init__(self, args, distance, d=0.1, LPA_step=1,epoch=10):
        super(FLAN, self).__init__()
        self.LPA_step = LPA_step
        self.epoch = epoch
        self.distance = distance
        self.Y_hat = None
        self.X = None
        self.I = torch.eye(args.c).to(args.device)
        self.A_align = None
        self.edge_rate = args.edge_rate2
        self.mask = args.mask
        self.d = d
        self.args = args



    def fit(self, X, Y):
        with torch.no_grad():
            self.Y_hat = Y
            self.X = X
            for i in range(self.epoch):
                for j in range(self.LPA_step):
                    Y_hat2 = self.embed(X, Y, self.Y_hat)
                    # Loss1 = torch.norm(self.embed(Y, X, X) - Y_hat2)

                    self.Y_hat = (1-self.d)*Y_hat2 + self.d*Y
                    self.Y_hat = F.normalize(self.Y_hat)

                    # Loss2 = torch.norm(self.embed(Y, X, X) - self.Y_hat)
                    # print("Loss1:",Loss1, "Loss2:", Loss2)

                    self.Y_hat[self.mask] = Y[self.mask]
                    if i  != self.epoch-1:
                        self.Y_hat[torch.abs(self.Y_hat) < 0.2] = 0  # 0.4
                        self.Y_hat = torch.clip(self.Y_hat, 0.0, 1)
                Y = self.Y_hat

    def embed(self, X, Y, Y_hat):

        coe2 = 1.0 / self.args.beta
        res = torch.mm(torch.transpose(X, 0, 1), X)  # H.T* H
        res2 = torch.mm(torch.transpose(X, 0, 1), Y_hat)  # H.T* Y
        inv = torch.inverse(self.I + coe2 * res)  #
        res3 = torch.mm(inv, res2)  # B中第二项的后面一部分
        B = coe2 * Y - coe2 * coe2 * torch.mm(X, res3)  # B
        tmp = torch.mm(torch.transpose(X, 0, 1), B)  # H.T * B
        part1 = torch.mm(Y, tmp)

        Y_hat = part1
        return  Y_hat
    def get_X(self):
        # for i in range(self.epoch):
        #     for j in range(self.LPA_step):
        #         X_hat2 = self.embed(self.Y_hat, self.X, self.X)
        #
        #         self.X = (1-self.d)*X_hat2 + self.d*self.X
        #         self.X = F.normalize(self.X)

        # return  self.X
        X_hat = self.embed(self.Y_hat, self.X, self.X)
        res = (1-self.d)*X_hat + self.d*self.X
        res = self.row_l1_normalize(res)

        return res

    # new
    def get_aligned_graph(self):
        if self.A_align == None:
            with torch.no_grad():
                coe2 = 1.0 / self.args.beta
                res = torch.mm(torch.transpose(self.X, 0, 1), self.X)  # H.T* H
                inv = torch.inverse(self.I + coe2 * res)  # Q中的逆矩阵
                res2 = torch.mm(torch.transpose(self.X, 0, 1), self.X) # X.TH
                res3 = torch.mm(res2, inv)
                res4 = torch.mm(self.Y_hat, res3)
                res5 = coe2 * coe2 * torch.mm(res4,torch.transpose(self.X, 0, 1))
                part1 = coe2 *torch.mm(self.Y_hat, torch.transpose(self.X, 0, 1)) - res5 #


                self.A_align = part1

        return self.A_align
    def get_Y_hat(self):
        return self.Y_hat

    def get_sparse_A(self): #
        kthvalue = torch.kthvalue(
            torch.abs(self.A_align).view(self.A_align.shape[0] * self.A_align.shape[1], 1).T,
            int(self.A_align.shape[0] * self.A_align.shape[0] * self.edge_rate))[0]
        mask = (torch.abs(self.A_align) > kthvalue).detach().float()
        sparse_A = (self.A_align * mask)
        return sparse_A

    def pairwise_distance(self, x, y=None):
        x = x.unsqueeze(0).permute(0, 2, 1)
        if y is None:
            y = x
        y = y.permute(0, 2, 1)  # [B, N, f]
        A = -2 * torch.bmm(y, x)  # [B, N, N]
        A += torch.sum(y ** 2, dim=2, keepdim=True)  # [B, N, 1]
        A += torch.sum(x ** 2, dim=1, keepdim=True)  # [B, 1, N]
        return A.squeeze()

    def row_l1_normalize(self, X):
        norm = 1e-6 + X.sum(dim=1, keepdim=True)
        return X / norm


# Share weight
class FLAN_GCN(nn.Module):
    def __init__(self, args, nfeat, nhid, nclass, dropout, tau=0.0):
        super(FLAN_GCN, self).__init__()

        self.gc1 = GraphConvolution(nfeat, nhid)
        self.gc2 = GraphConvolution(nhid, nclass)
        self.dropout = dropout
        self.gamma = args.gamma
        self.fc = nn.Linear(nclass, nclass)

    def projection(self, z: torch.Tensor) -> torch.Tensor:
        z = F.elu(self.fc(z))
        return z

    def forward(self, x, adj, adj2):
        x = F.dropout(x, self.dropout, training=self.training)
        x1 = F.relu(self.gc1(x, adj))
        x2 = F.relu(self.gc1(x, adj2))

        x1 = F.dropout(x1, self.dropout, training=self.training)
        x2 = F.dropout(x2, self.dropout, training=self.training)

        x1 = self.gc2(x1, adj)
        x2 = self.gc2(x2, adj2) # adj

        x = (1 - self.gamma)*x1 + self.gamma*x2
        return x, self.projection(x1), self.projection(x2)

class FLAN_GCN_onelayer(nn.Module):
    def __init__(self, args, nfeat, nhid, nclass, dropout, tau=0.0):
        super(FLAN_GCN_onelayer, self).__init__()

        self.gc1 = GraphConvolution(nfeat, nclass)
        self.dropout = dropout
        self.gamma = args.gamma
        self.fc = nn.Linear(nclass, nclass)

    def projection(self, z: torch.Tensor) -> torch.Tensor:
        z = F.elu(self.fc(z))
        return z

    def forward(self, x, adj, adj2):
        x = F.dropout(x, self.dropout, training=self.training)
        x1 = F.relu(self.gc1(x, adj))
        x2 = F.relu(self.gc1(x, adj2))

        x = (1 - self.gamma)*x1 + self.gamma*x2
        return x, self.projection(x1), self.projection(x2)

class FLAN_GCN2(nn.Module):
    def __init__(self, args, nfeat, nhid, nclass, dropout, tau=0.0):
        super(FLAN_GCN2, self).__init__()

        self.gc1 = GraphConvolution(nfeat, nhid)
        self.gc2 = GraphConvolution(nhid, nclass)

        self.gc1_1 = GraphConvolution(nfeat, nhid)
        self.gc2_1 = GraphConvolution(nhid, nclass)
        self.dropout = dropout
        self.gamma = args.gamma

    def forward(self, x, adj, adj2):
        x = F.dropout(x, self.dropout, training=self.training)
        x1 = F.relu(self.gc1(x, adj))
        x1 = F.dropout(x1, self.dropout, training=self.training)
        x1 = self.gc2(x1, adj)

        x2 = F.relu(self.gc1_1(x, adj2))
        x2 = F.dropout(x2, self.dropout, training=self.training)
        x2 = self.gc2_1(x2, adj2)

        x = (1 - self.gamma)*x1 + self.gamma*x2
        return x

class FLAN_GCN3(nn.Module):
    def __init__(self, args, nfeat, nhid, nclass, dropout, tau=0.0):
        super(FLAN_GCN, self).__init__()

        self.gc1 = GraphConvolution(nfeat, nhid)
        self.gc2 = GraphConvolution(nhid, nhid)
        self.gc3 = GraphConvolution(nhid, nclass)
        self.dropout = dropout
        self.gamma = args.gamma
        self.fc = nn.Linear(nclass, nclass)

    def projection(self, z: torch.Tensor) -> torch.Tensor:
        z = F.elu(self.fc(z))
        return z

    def forward(self, x, adj, adj2):
        x = F.dropout(x, self.dropout, training=self.training)
        x1 = F.relu(self.gc1(x, adj))
        x2 = F.relu(self.gc1(x, adj2))

        x1 = F.dropout(x1, self.dropout, training=self.training)
        x2 = F.dropout(x2, self.dropout, training=self.training)

        x1 = self.gc2(x1, adj)
        x2 = self.gc2(x2, adj2)

        x1 = F.dropout(x1, self.dropout, training=self.training)
        x2 = F.dropout(x2, self.dropout, training=self.training)

        x1 = self.gc3(x1, adj)
        x2 = self.gc3(x2, adj2)

        x = (1 - self.gamma)*x1 + self.gamma*x2
        return x, self.projection(x1), self.projection(x2)

GitHub Events

Total
  • Watch event: 1
  • Public event: 1
  • Push event: 5
Last Year
  • Watch event: 1
  • Public event: 1
  • Push event: 5