package 社区发现1;
import java.util.*;
import java.io.*;
public class NetWork {
double weight[][]; //图中两节点的连接边的权重
LinkedList neighborlist[]; //每个节点的邻居节点有哪些
double nodeweight[]; //每个节点的权值
int nodecount; //图中节点的总数量
int cluster[]; //记录每个节点属于哪一个簇
int clustercount; //簇的总数量
double clusterweight[]; //每一个簇的权重
boolean clusteringStatsAvailable ; //当前的网络信息是否完全
int nodecountsPercluster[]; //每个簇有多少个节点
int nodePercluster[][]; //nodePercluster[i][j]表示簇i中第j个节点的id
NetWork(String filename) throws IOException
{
DataSet ds = new DataSet(filename); //ds是用来读取输入文件内容的
weight= ds.weight;
neighborlist= ds.neighborlist;
nodecount= ds.nodecount;
nodeweight= ds.nodeweight;
initSingletonClusters();
}
NetWork()
{
}
public double getTotalEdgeWeight() //获取网络图中所有的边的权值之和, 返回的结果实际上是2倍
{ //因为每一条边被计算了2次
double totalEdgeWeight;
int i;
totalEdgeWeight = 0;
for (i= 0; i < nodecount; i++)
{
for(int j=0;j<neighborlist[i].size();j++)
{
int neighborid =(Integer)neighborlist[i].get(j);
totalEdgeWeight +=weight[i][neighborid];
}
}
return totalEdgeWeight;
}
public void initSingletonClusters() //给每个节点指派一个簇
{
int i;
clustercount = nodecount;
cluster = new int[nodecount];
for (i= 0; i < nodecount; i++)
cluster[i] = i;
deleteClusteringStats();
}
private void calcClusteringStats() //统计好每个簇有多少个节点,以及每个簇中的节点都有哪些
{
int i,j;
clusterweight = new double[clustercount];
nodecountsPercluster = new int[clustercount];//每个簇有多少个节点
nodePercluster = new int[clustercount][]; //nodePercluster[i][j]表示簇i中第j个节点的id
for (i= 0; i < nodecount; i++)
{
//计算每一个簇的权值,簇的权值是其中的节点的权值之和
clusterweight[cluster[i]] += nodeweight[i];
nodecountsPercluster[cluster[i]]++;
}
for (i= 0; i < clustercount; i++)
{
nodePercluster[i] = new int[nodecountsPercluster[i]];
nodecountsPercluster[i] = 0;
}
for (i= 0; i < nodecount; i++)
{
j= cluster[i]; //j是簇编号, 记录每一个簇中的第几个节点的id
nodePercluster[j][nodecountsPercluster[j]] = i;
nodecountsPercluster[j]++;
}
clusteringStatsAvailable = true;
}
private void deleteClusteringStats()
{
clusterweight = null;
nodecountsPercluster = null;
nodePercluster = null;
clusteringStatsAvailable = false;
}
public int[] getClusters()
{
return cluster;
}
public double calcQualityFunction(double resolution) //计算模块度的值, 如果所有边的权重之和为m
{ //那么resolution就是1/(2m)
double qualityFunction, totalEdgeWeight;
int i,j, k;
if(cluster == null)
return Double.NaN;
if(!clusteringStatsAvailable)
calcClusteringStats();
qualityFunction = 0;
totalEdgeWeight = 0;
for (i= 0; i < nodecount; i++)
{
j= cluster[i];
for( k=0;k<neighborlist[i].size();k++)
{
int neighborid =(Integer)neighborlist[i].get(k);
if(cluster[neighborid] == j)
qualityFunction += weight[i][neighborid];
totalEdgeWeight += weight[i][neighborid];
} //最终的totalEdgeWeight也是
//图中所有边权重之和的2倍
}
for (i= 0; i < clustercount; i++)
qualityFunction -= clusterweight[i] * clusterweight[i] * resolution;
qualityFunction /= totalEdgeWeight;
return qualityFunction;
}
public int getNClusters()
{
return clustercount;
}
public void mergeClusters(int[] newCluster) //newcluster是reduced之后的reducednetwork中的所属簇信息
{
int i,j, k;
if(cluster == null)
return;
i = 0;
for (j= 0; j < nodecount; j++)
{
k= newCluster[cluster[j]]; //reducednetwork中的newcluster的一个下标相当于
if(k > i) //reduce之前的网络中的每一个簇
i = k;
cluster[j] = k;
}
clustercount = i + 1;
deleteClusteringStats();
}
public NetWork getReducedNetwork() //将整个网络进行reduce操作
{
double[] reducedNetworkEdgeWeight2;
int i,j, k, l, m,reducedNetworkNEdges2;
int[] reducedNetworkNeighbor2;
NetWork reducedNetwork;
if (cluster== null)
return null;
if(!clusteringStatsAvailable)
calcClusteringStats();
reducedNetwork = new NetWork();
reducedNetwork.nodecount = clustercount; //reduce之后,原来的一个簇就是现在的一个节点
reducedNetwork.neighborlist = new LinkedList[clustercount];
for(i=0;i<clustercount;i++)
reducedNetwork.neighborlist[i] = new LinkedList();
reducedNetwork.nodeweight = new double[clustercount];
reducedNetwork.weight = new double[clustercount][clustercount];
reducedNetworkNeighbor2 = new int[clustercount -1];
reducedNetworkEdgeWeight2 = new double[clustercount];
for (i= 0; i < clustercount; i++)
{
reducedNetworkNEdges2 = 0;
for (j = 0; j < nodePercluster[i].length; j++)
{
k = nodePercluster[i][j]; //k是原来的簇i中第j个节点的id
for( l=0;l<neighborlist[k].size();l++)
{
int nodeid =(Integer)neighborlist[k].get(l);
m = cluster[nodeid];
if( m != i) //reduce之前簇i与簇m相连,应为它们之间有边存在,edge(k,nodeid)
{
if(reducedNetworkEdgeWeight2[m]== 0)
{
//以前的簇邻居变成了现在的节点邻居
reducedNetworkNeighbor2[reducedNetworkNEdges2]= m;
reducedNetworkNEdges2++;
//reducedNetworkEdges记录在新的图中新的节点i(原来的簇i)有多少个邻居
}
reducedNetworkEdgeWeight2[m]+= weight[k][nodeid];
Louvain java实现
需积分: 4 102 浏览量
2019-01-18
15:50:55
上传
评论 1
收藏 6KB ZIP 举报
xu小小
- 粉丝: 14
- 资源: 15
最新资源
- Screenshot_20240427_031602.jpg
- 网页PDF_2024年04月26日 23-46-14_QQ浏览器网页保存_QQ浏览器转格式(6).docx
- 直接插入排序,冒泡排序,直接选择排序.zip
- 在排序2的基础上,再次对快排进行优化,其次增加快排非递归,归并排序,归并排序非递归版.zip
- 实现了7种排序算法.三种复杂度排序.三种nlogn复杂度排序(堆排序,归并排序,快速排序)一种线性复杂度的排序.zip
- 冒泡排序 直接选择排序 直接插入排序 随机快速排序 归并排序 堆排序.zip
- 课设-内部排序算法比较 包括冒泡排序、直接插入排序、简单选择排序、快速排序、希尔排序、归并排序和堆排序.zip
- Python排序算法.zip
- C语言实现直接插入排序、希尔排序、选择排序、冒泡排序、堆排序、快速排序、归并排序、计数排序,并带图详解.zip
- 常用工具集参考用于图像等数据处理
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈