import networkx as nx
import numpy as np
import time
##################################################
np.random.seed(0)
N = 500 # node size
p = 0.6
G = nx.fast_gnp_random_graph(N, p, seed=0)
print("== Graph generated")
# closeness, information centrality를 각각 구함.
close_time = time.time()
closeness_cent = nx.closeness_centrality(G)
close_time = time.time() - close_time
info_time = time.time()
information_cent = nx.information_centrality(G, weight=None)
info_time = time.time() - info_time
# information centrality가 closeness centrality에 비해서 훨씬 시간이 적게 걸림
print(f"close_time: {close_time:8.5f}")
print(f"info_time : {info_time:8.5f}")
print(f"Information centrality is {close_time/info_time: 6.2f} times faster than closeness centrality")
print(f"==")
for i, n in enumerate(closeness_cent.keys()):
print(f"NODE: {n:2d} - Close cent: {closeness_cent[n]:6.4f} - info cent: {information_cent[n]: 6.4f}")
if i>5:
break
print(f"==")
# A, B 를 normalize하지 않으면, correlation이 1보다 크게 나올 수 있음.
A = np.array([*closeness_cent.values()])
A /= np.linalg.norm(A)
B = np.array([*information_cent.values()])
B /= np.linalg.norm(B)
print(f"== correlation: {np.correlate(A, B)[0]: 5.3%}")
= Graph generated
close_time: 31.96718
info_time : 2.35090
Information centrality is 13.60 times faster than closeness centrality
==
NODE: 0 - Close cent: 0.7118 - info cent: 0.2990
NODE: 1 - Close cent: 0.7008 - info cent: 0.2933
NODE: 2 - Close cent: 0.7317 - info cent: 0.3083
NODE: 3 - Close cent: 0.7048 - info cent: 0.2954
NODE: 4 - Close cent: 0.7221 - info cent: 0.3040
NODE: 5 - Close cent: 0.7263 - info cent: 0.3059
NODE: 6 - Close cent: 0.7088 - info cent: 0.2975
==
== correlation: 100.000%
댓글남기기