diff --git a/__init__.py b/__init__.py deleted file mode 100644 index bfa83a0..0000000 --- a/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from __future__ import print_function -from __future__ import division diff --git a/constructor.py b/constructor.py deleted file mode 100644 index b51f8a8..0000000 --- a/constructor.py +++ /dev/null @@ -1,195 +0,0 @@ -import tensorflow as tf -import numpy as np - -from load_data import load_data_1 -from model import GCN, Generator_z2g, Discriminator, D_graph -from optimizer import OptimizerAE, OptimizerCycle -import scipy.sparse as sp -from input_data import load_data -import inspect -from preprocessing import preprocess_graph, sparse_to_tuple, mask_test_edges, construct_feed_dict - -flags = tf.app.flags -FLAGS = flags.FLAGS - - -def get_placeholder(adj, num_features): - # 给tf.sparse_placeholder喂数据时: - # 1.应该直接填充 (indices, values, shape) - # 2.或者使用 tf.SparseTensorValue - - placeholders = { - 'features': tf.sparse_placeholder(tf.float32), - 'features_dense': tf.placeholder(tf.float32, shape=[adj.shape[0], num_features], - name='real_distribution'), - 'adj': tf.sparse_placeholder(tf.float32), - 'adj_orig': tf.sparse_placeholder(tf.float32), - 'dropout': tf.placeholder_with_default(0., shape=()), - 'real_distribution': tf.placeholder(dtype=tf.float32, shape=[adj.shape[0], FLAGS.hidden2], - name='real_distribution') - - } - - return placeholders - - -def get_model(model_str, placeholders, num_features, num_nodes, features_nonzero): - # 计算图构建 - discriminator = Discriminator() - D_Graph = D_graph(num_features) - d_real = discriminator.construct(placeholders['real_distribution']) - GD_real = D_Graph.construct(placeholders['features_dense']) - model = None - if model_str == 'arga_ae': - model = GCN(placeholders, num_features, features_nonzero) - - elif model_str == 'DBGAN': - model = GCN(placeholders, num_features, features_nonzero) - model_z2g = Generator_z2g(placeholders, num_features, features_nonzero) - - return d_real, discriminator, model, model_z2g, D_Graph, GD_real - - -def format_data(data_name): - # Load data - - # adj, features, y_test, tx, ty, test_maks, true_labels = load_data(data_name) - adj, features, y_test, tx, ty, test_maks, true_labels = load_data_1("luo") # e ic gpcr nr luo - - # Store original adjacency matrix (without diagonal entries) for later - adj_orig = adj - # 删除对角线元素 - adj_orig = adj_orig - sp.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape) - adj_orig.eliminate_zeros() - - adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = mask_test_edges(adj) - adj = adj_train - adj_dense = adj.toarray() - - if FLAGS.features == 0: - features = sp.identity(features.shape[0]) # featureless - - # Some preprocessing - adj_norm = preprocess_graph(adj) - - num_nodes = adj.shape[0] - features_dense = features.tocoo().toarray() - - features = sparse_to_tuple(features.tocoo()) - # num_features是feature的维度 - num_features = features[2][1] - # features_nonzero就是非零feature的个数 - features_nonzero = features[1].shape[0] - - pos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum() - norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2) - - adj_label = adj_train + sp.eye(adj_train.shape[0]) - adj_label = sparse_to_tuple(adj_label) - items = [ - adj, num_features, num_nodes, features_nonzero, - pos_weight, norm, adj_norm, adj_label, - features, true_labels, train_edges, val_edges, - val_edges_false, test_edges, test_edges_false, adj_orig, features_dense, adj_dense, features_dense - ] - - feas = {} - - print('num_features is:', num_features) - print('num_nodes is:', num_nodes) - print('features_nonzero is:', features_nonzero) - print('pos_weight is:', pos_weight) - print('norm is:', norm) - - for item in items: - # item_name = [ k for k,v in locals().iteritems() if v == item][0] - feas[retrieve_name(item)] = item - - feas['num_features'] = num_features - feas['num_nodes'] = num_nodes - return feas - - -def get_optimizer(model_str, model, model_z2g, D_Graph, discriminator, placeholders, pos_weight, norm, d_real, num_nodes, GD_real): - if model_str == 'arga_ae': - output = model.construct() - embeddings = output[0] - reconstructions = output[1] - d_fake = discriminator.construct(embeddings, reuse=True) - opt = OptimizerAE(preds=reconstructions, - labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'], - validate_indices=False), [-1]), - pos_weight=pos_weight, - norm=norm, - d_real=d_real, - d_fake=d_fake) - elif model_str == 'DBGAN': - - z2g = model_z2g.construct() - hidden = z2g[1] - z2g = z2g[0] - preds_z2g = model.construct(hidden=hidden, reuse=True)[0] - g2z = model.construct() - - embeddings = g2z[0] - reconstructions = g2z[1] - d_fake = discriminator.construct(embeddings, reuse=True) - GD_fake = D_Graph.construct(z2g, reuse=True) - - epsilon = tf.random_uniform(shape=[1], minval=0.0, maxval=1.0) - interpolated_input = epsilon * placeholders['real_distribution'] + (1 - epsilon) * embeddings - gradient = tf.gradients(discriminator.construct(interpolated_input, reuse=True), [interpolated_input])[0] - - epsilon = tf.random_uniform(shape=[1], minval=0.0, maxval=1.0) - interpolated_input = epsilon * placeholders['features_dense'] + (1 - epsilon) * z2g - gradient_z = tf.gradients(D_Graph.construct(interpolated_input, reuse=True), [interpolated_input])[0] - - opt = OptimizerCycle(preds=reconstructions, - labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'], - validate_indices=False), [-1]), - pos_weight=pos_weight, - norm=norm, - d_real=d_real, - d_fake=d_fake, - GD_real=GD_real, - GD_fake=GD_fake, - preds_z2g=preds_z2g, - labels_z2g=placeholders['real_distribution'], - preds_cycle=model_z2g.construct(embeddings, reuse=True)[0], - labels_cycle=placeholders['features_dense'], - gradient=gradient, - gradient_z=gradient_z) - return opt - - -def update(model, opt, sess, adj_norm, adj_label, features, placeholders, adj, distribution, adj_dense): - # Construct feed dictionary - feed_dict = construct_feed_dict(adj_norm, adj_label, features, placeholders) - feed_dict.update({placeholders['dropout']: FLAGS.dropout}) - feed_dict.update({placeholders['features_dense']: adj_dense}) - feed_dict.update({placeholders['dropout']: 0}) - z_real_dist = np.random.randn(adj.shape[0], FLAGS.hidden2) - z_real_dist = distribution.sample(adj.shape[0]) - feed_dict.update({placeholders['real_distribution']: z_real_dist}) - - for j in range(5): - _, reconstruct_loss = sess.run([opt.opt_op, opt.cost], feed_dict=feed_dict) - g_loss, _ = sess.run([opt.generator_loss, opt.generator_optimizer], feed_dict=feed_dict) - d_loss, _ = sess.run([opt.dc_loss, opt.discriminator_optimizer], feed_dict=feed_dict) - - GD_loss, _ = sess.run([opt.GD_loss, opt.discriminator_optimizer_z2g], feed_dict=feed_dict) - GG_loss, _ = sess.run([opt.generator_loss_z2g, opt.generator_optimizer_z2g], feed_dict=feed_dict) - # GD_loss = sess.run(opt.GD_loss, feed_dict=feed_dict) - # GG_loss = sess.run(opt.generator_loss_z2g, feed_dict=feed_dict) - # g_loss = sess.run(opt.generator_loss, feed_dict=feed_dict) - # d_loss = sess.run(opt.dc_loss, feed_dict=feed_dict) - emb = sess.run(model.z_mean, feed_dict=feed_dict) - avg_cost = [reconstruct_loss, d_loss, g_loss, GD_loss, GG_loss] - - return emb, avg_cost - - -def retrieve_name(var): - callers_local_vars = inspect.currentframe().f_back.f_locals.items() - print([var_name for var_name, var_val in callers_local_vars if var_val is var]) - return [var_name for var_name, var_val in callers_local_vars if var_val is var][0] diff --git a/data/ind.citeseer.allx b/data/ind.citeseer.allx deleted file mode 100644 index 5920910..0000000 Binary files a/data/ind.citeseer.allx and /dev/null differ diff --git a/data/ind.citeseer.ally b/data/ind.citeseer.ally deleted file mode 100644 index 7503f81..0000000 Binary files a/data/ind.citeseer.ally and /dev/null differ diff --git a/data/ind.citeseer.graph b/data/ind.citeseer.graph deleted file mode 100644 index a01dca6..0000000 Binary files a/data/ind.citeseer.graph and /dev/null differ diff --git a/data/ind.citeseer.test.index b/data/ind.citeseer.test.index deleted file mode 100644 index 62d9e3d..0000000 --- a/data/ind.citeseer.test.index +++ /dev/null @@ -1,1000 +0,0 @@ -2488 -2644 -3261 -2804 -3176 -2432 -3310 -2410 -2812 -2520 -2994 -3282 -2680 -2848 -2670 -3005 -2977 -2592 -2967 -2461 -3184 -2852 -2768 -2905 -2851 -3129 -3164 -2438 -2793 -2763 -2528 -2954 -2347 -2640 -3265 -2874 -2446 -2856 -3149 -2374 -3097 -3301 -2664 -2418 -2655 -2464 -2596 -3262 -3278 -2320 -2612 -2614 -2550 -2626 -2772 -3007 -2733 -2516 -2476 -2798 -2561 -2839 -2685 -2391 -2705 -3098 -2754 -3251 -2767 -2630 -2727 -2513 -2701 -3264 -2792 -2821 -3260 -2462 -3307 -2639 -2900 -3060 -2672 -3116 -2731 -3316 -2386 -2425 -2518 -3151 -2586 -2797 -2479 -3117 -2580 -3182 -2459 -2508 -3052 -3230 -3215 -2803 -2969 -2562 -2398 -3325 -2343 -3030 -2414 -2776 -2383 -3173 -2850 -2499 -3312 -2648 -2784 -2898 -3056 -2484 -3179 -3132 -2577 -2563 -2867 -3317 -2355 -3207 -3178 -2968 -3319 -2358 -2764 -3001 -2683 -3271 -2321 -2567 -2502 -3246 -2715 -3066 -2390 -2381 -3162 -2741 -2498 -2790 -3038 -3321 -2481 -3050 -3161 -3122 -2801 -2957 -3177 -2965 -2621 -3208 -2921 -2802 -2357 -2677 -2519 -2860 -2696 -2368 -3241 -2858 -2419 -2762 -2875 -3222 -3064 -2827 -3044 -2471 -3062 -2982 -2736 -2322 -2709 -2766 -2424 -2602 -2970 -2675 -3299 -2554 -2964 -2597 -2753 -2979 -2523 -2912 -2896 -2317 -3167 -2813 -2482 -2557 -3043 -3244 -2985 -2460 -2363 -3272 -3045 -3192 -2453 -2656 -2834 -2443 -3202 -2926 -2711 -2633 -2384 -2752 -3285 -2817 -2483 -2919 -2924 -2661 -2698 -2361 -2662 -2819 -3143 -2316 -3196 -2739 -2345 -2578 -2822 -3229 -2908 -2917 -2692 -3200 -2324 -2522 -3322 -2697 -3163 -3093 -3233 -2774 -2371 -2835 -2652 -2539 -2843 -3231 -2976 -2429 -2367 -3144 -2564 -3283 -3217 -3035 -2962 -2433 -2415 -2387 -3021 -2595 -2517 -2468 -3061 -2673 -2348 -3027 -2467 -3318 -2959 -3273 -2392 -2779 -2678 -3004 -2634 -2974 -3198 -2342 -2376 -3249 -2868 -2952 -2710 -2838 -2335 -2524 -2650 -3186 -2743 -2545 -2841 -2515 -2505 -3181 -2945 -2738 -2933 -3303 -2611 -3090 -2328 -3010 -3016 -2504 -2936 -3266 -3253 -2840 -3034 -2581 -2344 -2452 -2654 -3199 -3137 -2514 -2394 -2544 -2641 -2613 -2618 -2558 -2593 -2532 -2512 -2975 -3267 -2566 -2951 -3300 -2869 -2629 -2747 -3055 -2831 -3105 -3168 -3100 -2431 -2828 -2684 -3269 -2910 -2865 -2693 -2884 -3228 -2783 -3247 -2770 -3157 -2421 -2382 -2331 -3203 -3240 -2351 -3114 -2986 -2688 -2439 -2996 -3079 -3103 -3296 -2349 -2372 -3096 -2422 -2551 -3069 -2737 -3084 -3304 -3022 -2542 -3204 -2949 -2318 -2450 -3140 -2734 -2881 -2576 -3054 -3089 -3125 -2761 -3136 -3111 -2427 -2466 -3101 -3104 -3259 -2534 -2961 -3191 -3000 -3036 -2356 -2800 -3155 -3224 -2646 -2735 -3020 -2866 -2426 -2448 -3226 -3219 -2749 -3183 -2906 -2360 -2440 -2946 -2313 -2859 -2340 -3008 -2719 -3058 -2653 -3023 -2888 -3243 -2913 -3242 -3067 -2409 -3227 -2380 -2353 -2686 -2971 -2847 -2947 -2857 -3263 -3218 -2861 -3323 -2635 -2966 -2604 -2456 -2832 -2694 -3245 -3119 -2942 -3153 -2894 -2555 -3128 -2703 -2323 -2631 -2732 -2699 -2314 -2590 -3127 -2891 -2873 -2814 -2326 -3026 -3288 -3095 -2706 -2457 -2377 -2620 -2526 -2674 -3190 -2923 -3032 -2334 -3254 -2991 -3277 -2973 -2599 -2658 -2636 -2826 -3148 -2958 -3258 -2990 -3180 -2538 -2748 -2625 -2565 -3011 -3057 -2354 -3158 -2622 -3308 -2983 -2560 -3169 -3059 -2480 -3194 -3291 -3216 -2643 -3172 -2352 -2724 -2485 -2411 -2948 -2445 -2362 -2668 -3275 -3107 -2496 -2529 -2700 -2541 -3028 -2879 -2660 -3324 -2755 -2436 -3048 -2623 -2920 -3040 -2568 -3221 -3003 -3295 -2473 -3232 -3213 -2823 -2897 -2573 -2645 -3018 -3326 -2795 -2915 -3109 -3086 -2463 -3118 -2671 -2909 -2393 -2325 -3029 -2972 -3110 -2870 -3284 -2816 -2647 -2667 -2955 -2333 -2960 -2864 -2893 -2458 -2441 -2359 -2327 -3256 -3099 -3073 -3138 -2511 -2666 -2548 -2364 -2451 -2911 -3237 -3206 -3080 -3279 -2934 -2981 -2878 -3130 -2830 -3091 -2659 -2449 -3152 -2413 -2722 -2796 -3220 -2751 -2935 -3238 -2491 -2730 -2842 -3223 -2492 -3074 -3094 -2833 -2521 -2883 -3315 -2845 -2907 -3083 -2572 -3092 -2903 -2918 -3039 -3286 -2587 -3068 -2338 -3166 -3134 -2455 -2497 -2992 -2775 -2681 -2430 -2932 -2931 -2434 -3154 -3046 -2598 -2366 -3015 -3147 -2944 -2582 -3274 -2987 -2642 -2547 -2420 -2930 -2750 -2417 -2808 -3141 -2997 -2995 -2584 -2312 -3033 -3070 -3065 -2509 -3314 -2396 -2543 -2423 -3170 -2389 -3289 -2728 -2540 -2437 -2486 -2895 -3017 -2853 -2406 -2346 -2877 -2472 -3210 -2637 -2927 -2789 -2330 -3088 -3102 -2616 -3081 -2902 -3205 -3320 -3165 -2984 -3185 -2707 -3255 -2583 -2773 -2742 -3024 -2402 -2718 -2882 -2575 -3281 -2786 -2855 -3014 -2401 -2535 -2687 -2495 -3113 -2609 -2559 -2665 -2530 -3293 -2399 -2605 -2690 -3133 -2799 -2533 -2695 -2713 -2886 -2691 -2549 -3077 -3002 -3049 -3051 -3087 -2444 -3085 -3135 -2702 -3211 -3108 -2501 -2769 -3290 -2465 -3025 -3019 -2385 -2940 -2657 -2610 -2525 -2941 -3078 -2341 -2916 -2956 -2375 -2880 -3009 -2780 -2370 -2925 -2332 -3146 -2315 -2809 -3145 -3106 -2782 -2760 -2493 -2765 -2556 -2890 -2400 -2339 -3201 -2818 -3248 -3280 -2570 -2569 -2937 -3174 -2836 -2708 -2820 -3195 -2617 -3197 -2319 -2744 -2615 -2825 -2603 -2914 -2531 -3193 -2624 -2365 -2810 -3239 -3159 -2537 -2844 -2758 -2938 -3037 -2503 -3297 -2885 -2608 -2494 -2712 -2408 -2901 -2704 -2536 -2373 -2478 -2723 -3076 -2627 -2369 -2669 -3006 -2628 -2788 -3276 -2435 -3139 -3235 -2527 -2571 -2815 -2442 -2892 -2978 -2746 -3150 -2574 -2725 -3188 -2601 -2378 -3075 -2632 -2794 -3270 -3071 -2506 -3126 -3236 -3257 -2824 -2989 -2950 -2428 -2405 -3156 -2447 -2787 -2805 -2720 -2403 -2811 -2329 -2474 -2785 -2350 -2507 -2416 -3112 -2475 -2876 -2585 -2487 -3072 -3082 -2943 -2757 -2388 -2600 -3294 -2756 -3142 -3041 -2594 -2998 -3047 -2379 -2980 -2454 -2862 -3175 -2588 -3031 -3012 -2889 -2500 -2791 -2854 -2619 -2395 -2807 -2740 -2412 -3131 -3013 -2939 -2651 -2490 -2988 -2863 -3225 -2745 -2714 -3160 -3124 -2849 -2676 -2872 -3287 -3189 -2716 -3115 -2928 -2871 -2591 -2717 -2546 -2777 -3298 -2397 -3187 -2726 -2336 -3268 -2477 -2904 -2846 -3121 -2899 -2510 -2806 -2963 -3313 -2679 -3302 -2663 -3053 -2469 -2999 -3311 -2470 -2638 -3120 -3171 -2689 -2922 -2607 -2721 -2993 -2887 -2837 -2929 -2829 -3234 -2649 -2337 -2759 -2778 -2771 -2404 -2589 -3123 -3209 -2729 -3252 -2606 -2579 -2552 diff --git a/data/ind.citeseer.tx b/data/ind.citeseer.tx deleted file mode 100644 index b2aff18..0000000 Binary files a/data/ind.citeseer.tx and /dev/null differ diff --git a/data/ind.citeseer.ty b/data/ind.citeseer.ty deleted file mode 100644 index 3795f79..0000000 Binary files a/data/ind.citeseer.ty and /dev/null differ diff --git a/data/ind.citeseer.x b/data/ind.citeseer.x deleted file mode 100644 index f094104..0000000 Binary files a/data/ind.citeseer.x and /dev/null differ diff --git a/data/ind.citeseer.y b/data/ind.citeseer.y deleted file mode 100644 index e857ac4..0000000 Binary files a/data/ind.citeseer.y and /dev/null differ diff --git a/data/ind.cora.allx b/data/ind.cora.allx deleted file mode 100644 index 44d53b1..0000000 Binary files a/data/ind.cora.allx and /dev/null differ diff --git a/data/ind.cora.ally b/data/ind.cora.ally deleted file mode 100644 index 04fbd0b..0000000 Binary files a/data/ind.cora.ally and /dev/null differ diff --git a/data/ind.cora.graph b/data/ind.cora.graph deleted file mode 100644 index 4d3bf85..0000000 Binary files a/data/ind.cora.graph and /dev/null differ diff --git a/data/ind.cora.test.index b/data/ind.cora.test.index deleted file mode 100644 index ded8092..0000000 --- a/data/ind.cora.test.index +++ /dev/null @@ -1,1000 +0,0 @@ -2692 -2532 -2050 -1715 -2362 -2609 -2622 -1975 -2081 -1767 -2263 -1725 -2588 -2259 -2357 -1998 -2574 -2179 -2291 -2382 -1812 -1751 -2422 -1937 -2631 -2510 -2378 -2589 -2345 -1943 -1850 -2298 -1825 -2035 -2507 -2313 -1906 -1797 -2023 -2159 -2495 -1886 -2122 -2369 -2461 -1925 -2565 -1858 -2234 -2000 -1846 -2318 -1723 -2559 -2258 -1763 -1991 -1922 -2003 -2662 -2250 -2064 -2529 -1888 -2499 -2454 -2320 -2287 -2203 -2018 -2002 -2632 -2554 -2314 -2537 -1760 -2088 -2086 -2218 -2605 -1953 -2403 -1920 -2015 -2335 -2535 -1837 -2009 -1905 -2636 -1942 -2193 -2576 -2373 -1873 -2463 -2509 -1954 -2656 -2455 -2494 -2295 -2114 -2561 -2176 -2275 -2635 -2442 -2704 -2127 -2085 -2214 -2487 -1739 -2543 -1783 -2485 -2262 -2472 -2326 -1738 -2170 -2100 -2384 -2152 -2647 -2693 -2376 -1775 -1726 -2476 -2195 -1773 -1793 -2194 -2581 -1854 -2524 -1945 -1781 -1987 -2599 -1744 -2225 -2300 -1928 -2042 -2202 -1958 -1816 -1916 -2679 -2190 -1733 -2034 -2643 -2177 -1883 -1917 -1996 -2491 -2268 -2231 -2471 -1919 -1909 -2012 -2522 -1865 -2466 -2469 -2087 -2584 -2563 -1924 -2143 -1736 -1966 -2533 -2490 -2630 -1973 -2568 -1978 -2664 -2633 -2312 -2178 -1754 -2307 -2480 -1960 -1742 -1962 -2160 -2070 -2553 -2433 -1768 -2659 -2379 -2271 -1776 -2153 -1877 -2027 -2028 -2155 -2196 -2483 -2026 -2158 -2407 -1821 -2131 -2676 -2277 -2489 -2424 -1963 -1808 -1859 -2597 -2548 -2368 -1817 -2405 -2413 -2603 -2350 -2118 -2329 -1969 -2577 -2475 -2467 -2425 -1769 -2092 -2044 -2586 -2608 -1983 -2109 -2649 -1964 -2144 -1902 -2411 -2508 -2360 -1721 -2005 -2014 -2308 -2646 -1949 -1830 -2212 -2596 -1832 -1735 -1866 -2695 -1941 -2546 -2498 -2686 -2665 -1784 -2613 -1970 -2021 -2211 -2516 -2185 -2479 -2699 -2150 -1990 -2063 -2075 -1979 -2094 -1787 -2571 -2690 -1926 -2341 -2566 -1957 -1709 -1955 -2570 -2387 -1811 -2025 -2447 -2696 -2052 -2366 -1857 -2273 -2245 -2672 -2133 -2421 -1929 -2125 -2319 -2641 -2167 -2418 -1765 -1761 -1828 -2188 -1972 -1997 -2419 -2289 -2296 -2587 -2051 -2440 -2053 -2191 -1923 -2164 -1861 -2339 -2333 -2523 -2670 -2121 -1921 -1724 -2253 -2374 -1940 -2545 -2301 -2244 -2156 -1849 -2551 -2011 -2279 -2572 -1757 -2400 -2569 -2072 -2526 -2173 -2069 -2036 -1819 -1734 -1880 -2137 -2408 -2226 -2604 -1771 -2698 -2187 -2060 -1756 -2201 -2066 -2439 -1844 -1772 -2383 -2398 -1708 -1992 -1959 -1794 -2426 -2702 -2444 -1944 -1829 -2660 -2497 -2607 -2343 -1730 -2624 -1790 -1935 -1967 -2401 -2255 -2355 -2348 -1931 -2183 -2161 -2701 -1948 -2501 -2192 -2404 -2209 -2331 -1810 -2363 -2334 -1887 -2393 -2557 -1719 -1732 -1986 -2037 -2056 -1867 -2126 -1932 -2117 -1807 -1801 -1743 -2041 -1843 -2388 -2221 -1833 -2677 -1778 -2661 -2306 -2394 -2106 -2430 -2371 -2606 -2353 -2269 -2317 -2645 -2372 -2550 -2043 -1968 -2165 -2310 -1985 -2446 -1982 -2377 -2207 -1818 -1913 -1766 -1722 -1894 -2020 -1881 -2621 -2409 -2261 -2458 -2096 -1712 -2594 -2293 -2048 -2359 -1839 -2392 -2254 -1911 -2101 -2367 -1889 -1753 -2555 -2246 -2264 -2010 -2336 -2651 -2017 -2140 -1842 -2019 -1890 -2525 -2134 -2492 -2652 -2040 -2145 -2575 -2166 -1999 -2434 -1711 -2276 -2450 -2389 -2669 -2595 -1814 -2039 -2502 -1896 -2168 -2344 -2637 -2031 -1977 -2380 -1936 -2047 -2460 -2102 -1745 -2650 -2046 -2514 -1980 -2352 -2113 -1713 -2058 -2558 -1718 -1864 -1876 -2338 -1879 -1891 -2186 -2451 -2181 -2638 -2644 -2103 -2591 -2266 -2468 -1869 -2582 -2674 -2361 -2462 -1748 -2215 -2615 -2236 -2248 -2493 -2342 -2449 -2274 -1824 -1852 -1870 -2441 -2356 -1835 -2694 -2602 -2685 -1893 -2544 -2536 -1994 -1853 -1838 -1786 -1930 -2539 -1892 -2265 -2618 -2486 -2583 -2061 -1796 -1806 -2084 -1933 -2095 -2136 -2078 -1884 -2438 -2286 -2138 -1750 -2184 -1799 -2278 -2410 -2642 -2435 -1956 -2399 -1774 -2129 -1898 -1823 -1938 -2299 -1862 -2420 -2673 -1984 -2204 -1717 -2074 -2213 -2436 -2297 -2592 -2667 -2703 -2511 -1779 -1782 -2625 -2365 -2315 -2381 -1788 -1714 -2302 -1927 -2325 -2506 -2169 -2328 -2629 -2128 -2655 -2282 -2073 -2395 -2247 -2521 -2260 -1868 -1988 -2324 -2705 -2541 -1731 -2681 -2707 -2465 -1785 -2149 -2045 -2505 -2611 -2217 -2180 -1904 -2453 -2484 -1871 -2309 -2349 -2482 -2004 -1965 -2406 -2162 -1805 -2654 -2007 -1947 -1981 -2112 -2141 -1720 -1758 -2080 -2330 -2030 -2432 -2089 -2547 -1820 -1815 -2675 -1840 -2658 -2370 -2251 -1908 -2029 -2068 -2513 -2549 -2267 -2580 -2327 -2351 -2111 -2022 -2321 -2614 -2252 -2104 -1822 -2552 -2243 -1798 -2396 -2663 -2564 -2148 -2562 -2684 -2001 -2151 -2706 -2240 -2474 -2303 -2634 -2680 -2055 -2090 -2503 -2347 -2402 -2238 -1950 -2054 -2016 -1872 -2233 -1710 -2032 -2540 -2628 -1795 -2616 -1903 -2531 -2567 -1946 -1897 -2222 -2227 -2627 -1856 -2464 -2241 -2481 -2130 -2311 -2083 -2223 -2284 -2235 -2097 -1752 -2515 -2527 -2385 -2189 -2283 -2182 -2079 -2375 -2174 -2437 -1993 -2517 -2443 -2224 -2648 -2171 -2290 -2542 -2038 -1855 -1831 -1759 -1848 -2445 -1827 -2429 -2205 -2598 -2657 -1728 -2065 -1918 -2427 -2573 -2620 -2292 -1777 -2008 -1875 -2288 -2256 -2033 -2470 -2585 -2610 -2082 -2230 -1915 -1847 -2337 -2512 -2386 -2006 -2653 -2346 -1951 -2110 -2639 -2520 -1939 -2683 -2139 -2220 -1910 -2237 -1900 -1836 -2197 -1716 -1860 -2077 -2519 -2538 -2323 -1914 -1971 -1845 -2132 -1802 -1907 -2640 -2496 -2281 -2198 -2416 -2285 -1755 -2431 -2071 -2249 -2123 -1727 -2459 -2304 -2199 -1791 -1809 -1780 -2210 -2417 -1874 -1878 -2116 -1961 -1863 -2579 -2477 -2228 -2332 -2578 -2457 -2024 -1934 -2316 -1841 -1764 -1737 -2322 -2239 -2294 -1729 -2488 -1974 -2473 -2098 -2612 -1834 -2340 -2423 -2175 -2280 -2617 -2208 -2560 -1741 -2600 -2059 -1747 -2242 -2700 -2232 -2057 -2147 -2682 -1792 -1826 -2120 -1895 -2364 -2163 -1851 -2391 -2414 -2452 -1803 -1989 -2623 -2200 -2528 -2415 -1804 -2146 -2619 -2687 -1762 -2172 -2270 -2678 -2593 -2448 -1882 -2257 -2500 -1899 -2478 -2412 -2107 -1746 -2428 -2115 -1800 -1901 -2397 -2530 -1912 -2108 -2206 -2091 -1740 -2219 -1976 -2099 -2142 -2671 -2668 -2216 -2272 -2229 -2666 -2456 -2534 -2697 -2688 -2062 -2691 -2689 -2154 -2590 -2626 -2390 -1813 -2067 -1952 -2518 -2358 -1789 -2076 -2049 -2119 -2013 -2124 -2556 -2105 -2093 -1885 -2305 -2354 -2135 -2601 -1770 -1995 -2504 -1749 -2157 diff --git a/data/ind.cora.tx b/data/ind.cora.tx deleted file mode 100644 index 6e856d7..0000000 Binary files a/data/ind.cora.tx and /dev/null differ diff --git a/data/ind.cora.ty b/data/ind.cora.ty deleted file mode 100644 index da1734a..0000000 Binary files a/data/ind.cora.ty and /dev/null differ diff --git a/data/ind.cora.x b/data/ind.cora.x deleted file mode 100644 index c4a91d0..0000000 Binary files a/data/ind.cora.x and /dev/null differ diff --git a/data/ind.cora.y b/data/ind.cora.y deleted file mode 100644 index 58e30ef..0000000 Binary files a/data/ind.cora.y and /dev/null differ diff --git a/data/ind.pubmed.allx b/data/ind.pubmed.allx deleted file mode 100644 index 3b86bcc..0000000 Binary files a/data/ind.pubmed.allx and /dev/null differ diff --git a/data/ind.pubmed.ally b/data/ind.pubmed.ally deleted file mode 100644 index 4c8bcdc..0000000 Binary files a/data/ind.pubmed.ally and /dev/null differ diff --git a/data/ind.pubmed.graph b/data/ind.pubmed.graph deleted file mode 100644 index 76eb87e..0000000 Binary files a/data/ind.pubmed.graph and /dev/null differ diff --git a/data/ind.pubmed.test.index b/data/ind.pubmed.test.index deleted file mode 100644 index 41f6bef..0000000 --- a/data/ind.pubmed.test.index +++ /dev/null @@ -1,1000 +0,0 @@ -18747 -19392 -19181 -18843 -19221 -18962 -19560 -19097 -18966 -19014 -18756 -19313 -19000 -19569 -19359 -18854 -18970 -19073 -19661 -19180 -19377 -18750 -19401 -18788 -19224 -19447 -19017 -19241 -18890 -18908 -18965 -19001 -18849 -19641 -18852 -19222 -19172 -18762 -19156 -19162 -18856 -18763 -19318 -18826 -19712 -19192 -19695 -19030 -19523 -19249 -19079 -19232 -19455 -18743 -18800 -19071 -18885 -19593 -19394 -19390 -18832 -19445 -18838 -19632 -19548 -19546 -18825 -19498 -19266 -19117 -19595 -19252 -18730 -18913 -18809 -19452 -19520 -19274 -19555 -19388 -18919 -19099 -19637 -19403 -18720 -19526 -18905 -19451 -19408 -18923 -18794 -19322 -19431 -18912 -18841 -19239 -19125 -19258 -19565 -18898 -19482 -19029 -18778 -19096 -19684 -19552 -18765 -19361 -19171 -19367 -19623 -19402 -19327 -19118 -18888 -18726 -19510 -18831 -19490 -19576 -19050 -18729 -18896 -19246 -19012 -18862 -18873 -19193 -19693 -19474 -18953 -19115 -19182 -19269 -19116 -18837 -18872 -19007 -19212 -18798 -19102 -18772 -19660 -19511 -18914 -18886 -19672 -19360 -19213 -18810 -19420 -19512 -18719 -19432 -19350 -19127 -18782 -19587 -18924 -19488 -18781 -19340 -19190 -19383 -19094 -18835 -19487 -19230 -18791 -18882 -18937 -18928 -18755 -18802 -19516 -18795 -18786 -19273 -19349 -19398 -19626 -19130 -19351 -19489 -19446 -18959 -19025 -18792 -18878 -19304 -19629 -19061 -18785 -19194 -19179 -19210 -19417 -19583 -19415 -19443 -18739 -19662 -18904 -18910 -18901 -18960 -18722 -18827 -19290 -18842 -19389 -19344 -18961 -19098 -19147 -19334 -19358 -18829 -18984 -18931 -18742 -19320 -19111 -19196 -18887 -18991 -19469 -18990 -18876 -19261 -19270 -19522 -19088 -19284 -19646 -19493 -19225 -19615 -19449 -19043 -19674 -19391 -18918 -19155 -19110 -18815 -19131 -18834 -19715 -19603 -19688 -19133 -19053 -19166 -19066 -18893 -18757 -19582 -19282 -19257 -18869 -19467 -18954 -19371 -19151 -19462 -19598 -19653 -19187 -19624 -19564 -19534 -19581 -19478 -18985 -18746 -19342 -18777 -19696 -18824 -19138 -18728 -19643 -19199 -18731 -19168 -18948 -19216 -19697 -19347 -18808 -18725 -19134 -18847 -18828 -18996 -19106 -19485 -18917 -18911 -18776 -19203 -19158 -18895 -19165 -19382 -18780 -18836 -19373 -19659 -18947 -19375 -19299 -18761 -19366 -18754 -19248 -19416 -19658 -19638 -19034 -19281 -18844 -18922 -19491 -19272 -19341 -19068 -19332 -19559 -19293 -18804 -18933 -18935 -19405 -18936 -18945 -18943 -18818 -18797 -19570 -19464 -19428 -19093 -19433 -18986 -19161 -19255 -19157 -19046 -19292 -19434 -19298 -18724 -19410 -19694 -19214 -19640 -19189 -18963 -19218 -19585 -19041 -19550 -19123 -19620 -19376 -19561 -18944 -19706 -19056 -19283 -18741 -19319 -19144 -19542 -18821 -19404 -19080 -19303 -18793 -19306 -19678 -19435 -19519 -19566 -19278 -18946 -19536 -19020 -19057 -19198 -19333 -19649 -19699 -19399 -19654 -19136 -19465 -19321 -19577 -18907 -19665 -19386 -19596 -19247 -19473 -19568 -19355 -18925 -19586 -18982 -19616 -19495 -19612 -19023 -19438 -18817 -19692 -19295 -19414 -19676 -19472 -19107 -19062 -19035 -18883 -19409 -19052 -19606 -19091 -19651 -19475 -19413 -18796 -19369 -19639 -19701 -19461 -19645 -19251 -19063 -19679 -19545 -19081 -19363 -18995 -19549 -18790 -18855 -18833 -18899 -19395 -18717 -19647 -18768 -19103 -19245 -18819 -18779 -19656 -19076 -18745 -18971 -19197 -19711 -19074 -19128 -19466 -19139 -19309 -19324 -18814 -19092 -19627 -19060 -18806 -18929 -18737 -18942 -18906 -18858 -19456 -19253 -19716 -19104 -19667 -19574 -18903 -19237 -18864 -19556 -19364 -18952 -19008 -19323 -19700 -19170 -19267 -19345 -19238 -18909 -18892 -19109 -19704 -18902 -19275 -19680 -18723 -19242 -19112 -19169 -18956 -19343 -19650 -19541 -19698 -19521 -19087 -18976 -19038 -18775 -18968 -19671 -19412 -19407 -19573 -19027 -18813 -19357 -19460 -19673 -19481 -19036 -19614 -18787 -19195 -18732 -18884 -19613 -19657 -19575 -19226 -19589 -19234 -19617 -19707 -19484 -18740 -19424 -18784 -19419 -19159 -18865 -19105 -19315 -19480 -19664 -19378 -18803 -19605 -18870 -19042 -19426 -18848 -19223 -19509 -19532 -18752 -19691 -18718 -19209 -19362 -19090 -19492 -19567 -19687 -19018 -18830 -19530 -19554 -19119 -19442 -19558 -19527 -19427 -19291 -19543 -19422 -19142 -18897 -18950 -19425 -19002 -19588 -18978 -19551 -18930 -18736 -19101 -19215 -19150 -19263 -18949 -18974 -18759 -19335 -19200 -19129 -19328 -19437 -18988 -19429 -19368 -19406 -19049 -18811 -19296 -19256 -19385 -19602 -18770 -19337 -19580 -19476 -19045 -19132 -19089 -19120 -19265 -19483 -18767 -19227 -18934 -19069 -18820 -19006 -19459 -18927 -19037 -19280 -19441 -18823 -19015 -19114 -19618 -18957 -19176 -18853 -19648 -19201 -19444 -19279 -18751 -19302 -19505 -18733 -19601 -19533 -18863 -19708 -19387 -19346 -19152 -19206 -18851 -19338 -19681 -19380 -19055 -18766 -19085 -19591 -19547 -18958 -19146 -18840 -19051 -19021 -19207 -19235 -19086 -18979 -19300 -18939 -19100 -19619 -19287 -18980 -19277 -19326 -19108 -18920 -19625 -19374 -19078 -18734 -19634 -19339 -18877 -19423 -19652 -19683 -19044 -18983 -19330 -19529 -19714 -19468 -19075 -19540 -18839 -19022 -19286 -19537 -19175 -19463 -19167 -19705 -19562 -19244 -19486 -19611 -18801 -19178 -19590 -18846 -19450 -19205 -19381 -18941 -19670 -19185 -19504 -19633 -18997 -19113 -19397 -19636 -19709 -19289 -19264 -19353 -19584 -19126 -18938 -19669 -18964 -19276 -18774 -19173 -19231 -18973 -18769 -19064 -19040 -19668 -18738 -19082 -19655 -19236 -19352 -19609 -19628 -18951 -19384 -19122 -18875 -18992 -18753 -19379 -19254 -19301 -19506 -19135 -19010 -19682 -19400 -19579 -19316 -19553 -19208 -19635 -19644 -18891 -19024 -18989 -19250 -18850 -19317 -18915 -19607 -18799 -18881 -19479 -19031 -19365 -19164 -18744 -18760 -19502 -19058 -19517 -18735 -19448 -19243 -19453 -19285 -18857 -19439 -19016 -18975 -19503 -18998 -18981 -19186 -18994 -19240 -19631 -19070 -19174 -18900 -19065 -19220 -19229 -18880 -19308 -19372 -19496 -18771 -19325 -19538 -19033 -18874 -19077 -19211 -18764 -19458 -19571 -19121 -19019 -19059 -19497 -18969 -19666 -19297 -19219 -19622 -19184 -18977 -19702 -19539 -19329 -19095 -19675 -18972 -19514 -19703 -19188 -18866 -18812 -19314 -18822 -18845 -19494 -19411 -18916 -19686 -18967 -19294 -19143 -19204 -18805 -19689 -19233 -18758 -18748 -19011 -19685 -19336 -19608 -19454 -19124 -18868 -18807 -19544 -19621 -19228 -19154 -19141 -19145 -19153 -18860 -19163 -19393 -19268 -19160 -19305 -19259 -19471 -19524 -18783 -19396 -18894 -19430 -19690 -19348 -19597 -19592 -19677 -18889 -19331 -18773 -19137 -19009 -18932 -19599 -18816 -19054 -19067 -19477 -19191 -18921 -18940 -19578 -19183 -19004 -19072 -19710 -19005 -19610 -18955 -19457 -19148 -18859 -18993 -19642 -19047 -19418 -19535 -19600 -19312 -19039 -19028 -18879 -19003 -19026 -19013 -19149 -19177 -19217 -18987 -19354 -19525 -19202 -19084 -19032 -18749 -18867 -19048 -18999 -19260 -19630 -18727 -19356 -19083 -18926 -18789 -19370 -18861 -19311 -19557 -19531 -19436 -19140 -19310 -19501 -18721 -19604 -19713 -19262 -19563 -19507 -19440 -19572 -19513 -19515 -19518 -19421 -19470 -19499 -19663 -19508 -18871 -19528 -19500 -19307 -19288 -19594 -19271 diff --git a/data/ind.pubmed.tx b/data/ind.pubmed.tx deleted file mode 100644 index eee4f3c..0000000 Binary files a/data/ind.pubmed.tx and /dev/null differ diff --git a/data/ind.pubmed.ty b/data/ind.pubmed.ty deleted file mode 100644 index 225a0bb..0000000 Binary files a/data/ind.pubmed.ty and /dev/null differ diff --git a/data/ind.pubmed.x b/data/ind.pubmed.x deleted file mode 100644 index 16c0eca..0000000 Binary files a/data/ind.pubmed.x and /dev/null differ diff --git a/data/ind.pubmed.y b/data/ind.pubmed.y deleted file mode 100644 index e866705..0000000 Binary files a/data/ind.pubmed.y and /dev/null differ diff --git a/initializations.py b/initializations.py deleted file mode 100644 index eea6b72..0000000 --- a/initializations.py +++ /dev/null @@ -1,12 +0,0 @@ -import tensorflow as tf -import numpy as np - - -def weight_variable_glorot(input_dim, output_dim, name=""): - """Create a weight variable with Glorot & Bengio (AISTATS 2010) - initialization. - """ - init_range = np.sqrt(6.0 / (input_dim + output_dim)) - initial = tf.random_uniform([input_dim, output_dim], minval=-init_range, - maxval=init_range, dtype=tf.float32) - return tf.Variable(initial, name=name) diff --git a/input_data.py b/input_data.py deleted file mode 100644 index e9a6fcf..0000000 --- a/input_data.py +++ /dev/null @@ -1,124 +0,0 @@ -import numpy as np -import pickle as pkl -import networkx as nx -import scipy.sparse as sp -import sys - - -def parse_index_file(filename): - index = [] - for line in open(filename): - index.append(int(line.strip())) - return index - - -def sample_mask(idx, l): - """Create mask.""" - mask = np.zeros(l) - mask[idx] = 1 - return np.array(mask, dtype=np.bool) - - -def load_data(dataset): - # load the data: x, tx, allx, graph - # x => 训练实例的特征向量,如scipy.sparse.csr.csr_matrix类的实例 - # tx => 测试实例的特征向量,如scipy.sparse.csr.csr_matrix类的实例 - # allx => 有标签的+无无标签训练实例的特征向量,是ind.dataset_str.x的超集 - # y => 训练实例的标签,独热编码,numpy.ndarray类的实例 - # ty => 测试实例的标签,独热编码,numpy.ndarray类的实例 - # ally => 有标签的+无无标签训练实例的标签,独热编码,numpy.ndarray类的实例 - # graph => 图数据,collections.defaultdict类的实例,格式为 {index:[index_of_neighbor_nodes]} - # index => 测试实例的id - names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph'] - objects = [] - for i in range(len(names)): - with open("data/ind.{}.{}".format(dataset, names[i]), 'rb') as f: - if sys.version_info > (3, 0): - objects.append(pkl.load(f, encoding='latin1')) - else: - objects.append(pkl.load(f)) - x, y, tx, ty, allx, ally, graph = tuple(objects) - test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset)) - test_idx_range = np.sort(test_idx_reorder) - - if dataset == 'citeseer': - # Fix citeseer dataset (there are some isolated nodes in the graph) - # Find isolated nodes, add them as zero-vecs into the right position、 - test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder) + 1) - tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1])) - tx_extended[test_idx_range - min(test_idx_range), :] = tx - tx = tx_extended - ty_extended = np.zeros((len(test_idx_range_full), y.shape[1])) - ty_extended[test_idx_range - min(test_idx_range), :] = ty - ty = ty_extended - - features = sp.vstack((allx, tx)).tolil() - features[test_idx_reorder, :] = features[test_idx_range, :] - adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph)) - - labels = np.vstack((ally, ty)) - labels[test_idx_reorder, :] = labels[test_idx_range, :] - - idx_test = test_idx_range.tolist() - idx_train = range(len(y)) - idx_val = range(len(y), len(y) + 500) - - train_mask = sample_mask(idx_train, labels.shape[0]) - val_mask = sample_mask(idx_val, labels.shape[0]) - test_mask = sample_mask(idx_test, labels.shape[0]) - - y_train = np.zeros(labels.shape) - y_val = np.zeros(labels.shape) - y_test = np.zeros(labels.shape) - y_train[train_mask, :] = labels[train_mask, :] - y_val[val_mask, :] = labels[val_mask, :] - y_test[test_mask, :] = labels[test_mask, :] - - return adj, features, y_test, tx, ty, test_mask, np.argmax(labels, 1) - - -def load_alldata(dataset_str): - """Load data.""" - names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph'] - objects = [] - for i in range(len(names)): - objects.append(pkl.load(open("data/ind.{}.{}".format(dataset_str, names[i])))) - - x, y, tx, ty, allx, ally, graph = tuple(objects) - test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str)) - test_idx_range = np.sort(test_idx_reorder) - - if dataset_str == 'citeseer': - # Fix citeseer dataset (there are some isolated nodes in the graph) - # Find isolated nodes, add them as zero-vecs into the right position - test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder) + 1) - tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1])) - tx_extended[test_idx_range - min(test_idx_range), :] = tx - tx = tx_extended - ty_extended = np.zeros((len(test_idx_range_full), y.shape[1])) - ty_extended[test_idx_range - min(test_idx_range), :] = ty - ty = ty_extended - - features = sp.vstack((allx, tx)).tolil() - features[test_idx_reorder, :] = features[test_idx_range, :] - adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph)) - - labels = np.vstack((ally, ty)) - labels[test_idx_reorder, :] = labels[test_idx_range, :] - - idx_test = test_idx_range.tolist() - idx_train = range(len(y)) - idx_val = range(len(y), len(y) + 500) - - train_mask = sample_mask(idx_train, labels.shape[0]) - val_mask = sample_mask(idx_val, labels.shape[0]) - test_mask = sample_mask(idx_test, labels.shape[0]) - - y_train = np.zeros(labels.shape) - y_val = np.zeros(labels.shape) - y_test = np.zeros(labels.shape) - y_train[train_mask, :] = labels[train_mask, :] - y_val[val_mask, :] = labels[val_mask, :] - y_test[test_mask, :] = labels[test_mask, :] - - return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask, np.argmax(labels, 1) diff --git a/layers.py b/layers.py deleted file mode 100644 index 2f157af..0000000 --- a/layers.py +++ /dev/null @@ -1,163 +0,0 @@ -from initializations import * -import tensorflow as tf - -flags = tf.app.flags -FLAGS = flags.FLAGS - -# global unique layer ID dictionary for layer name assignment -_LAYER_UIDS = {} - - -def get_layer_uid(layer_name=''): - """Helper function, assigns unique layer IDs - 分配唯一的层ID - """ - if layer_name not in _LAYER_UIDS: - _LAYER_UIDS[layer_name] = 1 - return 1 - else: - _LAYER_UIDS[layer_name] += 1 - return _LAYER_UIDS[layer_name] - - -def dropout_sparse(x, keep_prob, num_nonzero_elems): - """ - Dropout for sparse tensors. Currently fails for very large sparse tensors (>1M elements) - num_nonzero_elems: 稀疏矩阵中的非零元素个数 - keep_prob: - x: input - """ - noise_shape = [num_nonzero_elems] - random_tensor = keep_prob - random_tensor += tf.random_uniform(noise_shape) - dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool) - pre_out = tf.sparse_retain(x, dropout_mask) - return pre_out * (1. / keep_prob) - - -class Layer(object): - """Base layer class. Defines basic API for all layer objects. - - # Properties - name: String, defines the variable scope of the layer. - - # Methods - _call(inputs): Defines computation graph of layer - (i.e. takes input, returns output) - __call__(inputs): Wrapper for _call() - """ - - def __init__(self, **kwargs): - allowed_kwargs = {'name', 'logging'} - for kwarg in kwargs.keys(): - assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg - name = kwargs.get('name') - if not name: - layer = self.__class__.__name__.lower() - name = layer + '_' + str(get_layer_uid(layer)) - self.name = name - self.vars = {} - logging = kwargs.get('logging', False) - self.logging = logging - self.issparse = False - - def _call(self, inputs): - return inputs - - def __call__(self, inputs): - with tf.name_scope(self.name): - outputs = self._call(inputs) - return outputs - - -class GraphConvolution(Layer): - """Basic graph convolution layer for undirected graph without edge labels.""" - - def __init__(self, input_dim, output_dim, adj, dropout=0., act=tf.nn.relu, **kwargs): - super(GraphConvolution, self).__init__(**kwargs) - with tf.variable_scope(self.name + '_vars'): - self.vars['weights'] = weight_variable_glorot(input_dim, output_dim, name="weights") - self.dropout = dropout - self.adj = adj - self.act = act - - def _call(self, inputs): - x = inputs - x = tf.nn.dropout(x, 1 - self.dropout) - x = tf.matmul(x, self.vars['weights']) - x = tf.sparse_tensor_dense_matmul(self.adj, x) - outputs = self.act(x) - return outputs - - -class GraphConvolutionSparse(Layer): - """ - Graph convolution layer for sparse inputs. - 多了一个features_nonzero - """ - - def __init__(self, input_dim, output_dim, adj, features_nonzero, dropout=0., act=tf.nn.relu, **kwargs): - super(GraphConvolutionSparse, self).__init__(**kwargs) - with tf.variable_scope(self.name + '_vars'): - self.vars['weights'] = weight_variable_glorot(input_dim, output_dim, name="weights") - self.dropout = dropout - self.adj = adj - self.act = act - self.issparse = True - self.features_nonzero = features_nonzero - - def _call(self, inputs): - x = inputs - x = dropout_sparse(x, 1 - self.dropout, self.features_nonzero) - x = tf.sparse_tensor_dense_matmul(x, self.vars['weights']) - x = tf.sparse_tensor_dense_matmul(self.adj, x) - outputs = self.act(x) - return outputs - - -class InnerProductDecoder(Layer): - """Decoder model layer for link prediction.""" - - def __init__(self, input_dim, dropout=0., act=tf.nn.sigmoid, **kwargs): - super(InnerProductDecoder, self).__init__(**kwargs) - self.dropout = dropout - self.act = act - - def _call(self, inputs): - """ - 这个decoder部分实际上就只是input的转置再乘input - """ - inputs = tf.nn.dropout(inputs, 1 - self.dropout) - x = tf.transpose(inputs) - x = tf.matmul(inputs, x) - x = tf.reshape(x, [-1]) - outputs = self.act(x) - return outputs - - -class GraphConvolution_z2g(Layer): - """Basic graph convolution layer for undirected graph without edge labels.""" - - def __init__(self, input_dim, output_dim, adj, dropout=0., act=tf.nn.relu, **kwargs): - super(GraphConvolution, self).__init__(**kwargs) - with tf.variable_scope(self.name + '_vars'): - self.vars['weights'] = weight_variable_glorot(input_dim, output_dim, name="weights") - self.dropout = dropout - self.adj = adj - self.act = act - - def _call(self, inputs): - x = inputs - x = tf.nn.dropout(x, 1 - self.dropout) - x = tf.matmul(x, self.vars['weights']) - x = tf.sparse_tensor_dense_matmul(self.adj, x) - outputs = self.act(x) - return outputs - - def _call(self, inputs): - x = inputs - x = dropout_sparse(x, 1 - self.dropout, self.features_nonzero) - x = tf.sparse_tensor_dense_matmul(x, self.vars['weights']) - x = tf.sparse_tensor_dense_matmul(self.adj, x) - outputs = self.act(x) - return outputs diff --git a/link_prediction.py b/link_prediction.py deleted file mode 100644 index a857e94..0000000 --- a/link_prediction.py +++ /dev/null @@ -1,113 +0,0 @@ -from __future__ import division -from __future__ import print_function -import os - -# Train on CPU (hide GPU) due to memory constraints -os.environ['CUDA_VISIBLE_DEVICES'] = "0" - -import tensorflow as tf -import settings -from constructor import get_placeholder, get_model, format_data, get_optimizer, update -from metrics import linkpred_metrics -from sklearn.neighbors import KernelDensity -from dppy.finite_dpps import FiniteDPP -from sklearn.decomposition import PCA -import numpy as np -import scipy.io as scio - -# Settings -flags = tf.app.flags -FLAGS = flags.FLAGS - - -class Link_pred_Runner(): - def __init__(self, settings): - self.data_name = settings['data_name'] - self.iteration = settings['iterations'] - self.model = settings['model'] - - def erun(self): - model_str = self.model - # formatted data - feas = format_data(self.data_name) - - # Define placeholders - # 定义placeholders,get_placeholder函数中只需要传入一个参数,即adj,函数中需要用到adj.shape - placeholders = get_placeholder(feas['adj'], feas['num_features']) - - # 定义由Dpp和密度估计出来的混合高斯 - DPP = FiniteDPP('correlation', **{'K': feas['adj'].toarray()}) - # DPP.sample_exact_k_dpp(size=4) - pca = PCA(n_components=FLAGS.hidden2) - - # index = DPP.list_of_samples[0] - - if self.data_name == 'cora': - DPP.sample_exact_k_dpp(size=21) # e 21 ic 6 gpcr 3 - index = DPP.list_of_samples[0] - pass - elif self.data_name == 'citeseer': - - index = np.array([1782, 741, 3258, 3189, 3112, 2524, 2895, 1780, 1100, 2735, 1318, - 2944, 1825, 18, 987, 2564, 463, 6, 3173, 701, 1901, 2349, - 2786, 2412, 646, 2626, 2648, 1793, 432, 538, 1729, 1217, 1397, - 1932, 2850, 458, 2129, 702, 2934, 2030, 2882, 1393, 308, 1271, - 1106, 2688, 629, 1145, 3251, 1903, 1004, 1149, 1385, 285, 858, - 2977, 844, 335, 532, 404, 3174, 528]) - - elif self.data_name == 'pubmed': - index = np.array([842, 3338, 5712, 17511, 10801, 2714, 6970, 13296, 5466, - 2230]) - feature_sample = feas['features_dense'] - feature_sample = pca.fit_transform(feature_sample) - - featuresCompress = np.array([feature_sample[i] for i in index]) - # featuresCompress = np.array(feature_sample) - kde = KernelDensity(bandwidth=0.7).fit(featuresCompress) - - # construct model - d_real, discriminator, ae_model, model_z2g, D_Graph, GD_real = get_model(model_str, placeholders, feas['num_features'], feas['num_nodes'], feas['features_nonzero']) - - # Optimizer - opt = get_optimizer(model_str, ae_model, model_z2g, D_Graph, discriminator, placeholders, feas['pos_weight'], feas['norm'], d_real, feas['num_nodes'], GD_real) - - # Initialize session - - # config = tf.ConfigProto() - # config.gpu_options.allow_growth = True - # sess = tf.Session(config = config) - sess = tf.Session() - sess.run(tf.global_variables_initializer()) - - val_roc_score = [] - record = [] - record_emb = [] - # Train model - for epoch in range(self.iteration): - - emb, avg_cost = update(ae_model, opt, sess, feas['adj_norm'], feas['adj_label'], feas['features'], placeholders, feas['adj'], kde, feas['features_dense']) - - lm_train = linkpred_metrics(feas['val_edges'], feas['val_edges_false']) - roc_curr, ap_curr, _, aupr_score = lm_train.get_roc_score(emb, feas) - val_roc_score.append(roc_curr) - print("Epoch:", '%04d' % (epoch + 1), - "train_loss= {:.5f}, d_loss= {:.5f}, g_loss= {:.5f}, GD_loss= {:.5f}, GG_loss= {:.5f}".format(avg_cost[0], avg_cost[1], avg_cost[2], avg_cost[3], avg_cost[4]), "val_roc=", - "{:.5f}".format(val_roc_score[-1]), "val_ap=", "{:.5f}".format(ap_curr), "val_aupr=", "{:.5f}".format(aupr_score)) - - if (epoch + 1) % 10 == 0: - lm_test = linkpred_metrics(feas['test_edges'], feas['test_edges_false']) - roc_score, ap_score, _, aupr_score = lm_test.get_roc_score(emb, feas) - print('Test ROC score: ' + str(roc_score), 'Test AUPR score: ' + str(aupr_score), 'Test AP score: ' + str(ap_score)) - # print('Test AUPR score: ' + str(aupr_score)) - # print('Test AP score: ' + str(ap_score)) - record.append([roc_score, aupr_score, ap_score]) - record_emb.append(emb) - rec = np.array(record) - index = rec[:, 0].tolist().index(max(rec[:, 0].tolist())) - index_pr = rec[:, 1].tolist().index(max(rec[:, 1].tolist())) - emb = record_emb[index] - ana = record[index] - ana_pr = record[index_pr] - # scio.savemat('result/{}_link_64_64_new.mat'.format(self.data_name), {'embedded': emb,'labels': feas['true_labels']}) - print('The peak [auc] test_roc=%f, aupr=%f, ap = %f' % (ana[0], ana[1], ana[2])) - print('The peak [aupr] test_roc=%f, aupr=%f, ap = %f' % (ana_pr[0], ana_pr[1], ana_pr[2])) diff --git a/load_data.py b/load_data.py deleted file mode 100644 index 90eff50..0000000 --- a/load_data.py +++ /dev/null @@ -1,27 +0,0 @@ -import pickle - -import numpy as np -import scipy.sparse as sp - - -def load_data_1(dataset): - adj = np.loadtxt('./data/partitioned_data/{0}/orig/{0}_adj_orig.txt'.format(dataset), dtype=int) - adj = sp.csr_matrix(adj) - - features = pickle.load(open("data/partitioned_data/{0}/feature/{0}_feature.pkl".format(dataset),'rb')) - - y_test = 0 - - tx = 0 - - ty = 0 - - test_mask = 0 - - labels = 0 - - return adj, features, y_test, tx, ty, test_mask, labels - - -if __name__ == "__main__": - load_data_1('e') diff --git a/metrics.py b/metrics.py deleted file mode 100644 index e90f6b8..0000000 --- a/metrics.py +++ /dev/null @@ -1,112 +0,0 @@ -from sklearn.metrics import f1_score -from sklearn.metrics import roc_auc_score,precision_recall_curve, auc -from sklearn.metrics import average_precision_score -from sklearn import metrics -from munkres import Munkres, print_matrix -import numpy as np - - -class linkpred_metrics(): - def __init__(self, edges_pos, edges_neg): - self.edges_pos = edges_pos - self.edges_neg = edges_neg - - def get_roc_score(self, emb, feas): - # if emb is None: - # feed_dict.update({placeholders['dropout']: 0}) - # emb = sess.run(model.z_mean, feed_dict=feed_dict) - - def sigmoid(x): - return 1 / (1 + np.exp(-x)) - - # Predict on test set of edges - adj_rec = np.dot(emb, emb.T) - preds = [] - pos = [] - for e in self.edges_pos: - preds.append(sigmoid(adj_rec[e[0], e[1]])) - pos.append(feas['adj_orig'][e[0], e[1]]) - - preds_neg = [] - neg = [] - for e in self.edges_neg: - preds_neg.append(sigmoid(adj_rec[e[0], e[1]])) - neg.append(feas['adj_orig'][e[0], e[1]]) - - preds_all = np.hstack([preds, preds_neg]) - labels_all = np.hstack([np.ones(len(preds)), np.zeros(len(preds))]) - roc_score = roc_auc_score(labels_all, preds_all) - ap_score = average_precision_score(labels_all, preds_all) - - precision, recall, _thresholds = metrics.precision_recall_curve(labels_all, preds_all) - aupr_score = auc(recall, precision) - - return roc_score, ap_score, emb, aupr_score - - -class clustering_metrics(): - def __init__(self, true_label, predict_label): - self.true_label = true_label - self.pred_label = predict_label - - def clusteringAcc(self): - # best mapping between true_label and predict label - l1 = list(set(self.true_label)) - numclass1 = len(l1) - - l2 = list(set(self.pred_label)) - numclass2 = len(l2) - if numclass1 != numclass2: - print('Class Not equal, Error!!!!') - return 0 - - cost = np.zeros((numclass1, numclass2), dtype=int) - for i, c1 in enumerate(l1): - mps = [i1 for i1, e1 in enumerate(self.true_label) if e1 == c1] - for j, c2 in enumerate(l2): - mps_d = [i1 for i1 in mps if self.pred_label[i1] == c2] - - cost[i][j] = len(mps_d) - - # match two clustering results by Munkres algorithm - m = Munkres() - cost = cost.__neg__().tolist() - - indexes = m.compute(cost) - - # get the match results - new_predict = np.zeros(len(self.pred_label)) - for i, c in enumerate(l1): - # correponding label in l2: - c2 = l2[indexes[i][1]] - - # ai is the index with label==c2 in the pred_label list - ai = [ind for ind, elm in enumerate(self.pred_label) if elm == c2] - new_predict[ai] = c - - acc = metrics.accuracy_score(self.true_label, new_predict) - f1_macro = metrics.f1_score(self.true_label, new_predict, average='macro') - precision_macro = metrics.precision_score(self.true_label, new_predict, average='macro') - recall_macro = metrics.recall_score(self.true_label, new_predict, average='macro') - f1_micro = metrics.f1_score(self.true_label, new_predict, average='micro') - precision_micro = metrics.precision_score(self.true_label, new_predict, average='micro') - recall_micro = metrics.recall_score(self.true_label, new_predict, average='micro') - return acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro - - def evaluationClusterModelFromLabel(self): - nmi = metrics.normalized_mutual_info_score(self.true_label, self.pred_label) - adjscore = metrics.adjusted_rand_score(self.true_label, self.pred_label) - acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro = self.clusteringAcc() - - print('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f' % ( - acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore)) - - fh = open('recoder.txt', 'a') - - fh.write('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f' % ( - acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore)) - fh.write('\r\n') - fh.flush() - fh.close() - - return acc, nmi, adjscore diff --git a/optimizer.py b/optimizer.py deleted file mode 100644 index 10280b1..0000000 --- a/optimizer.py +++ /dev/null @@ -1,131 +0,0 @@ -import tensorflow as tf - -flags = tf.app.flags -FLAGS = flags.FLAGS - - -class OptimizerAE(object): - def __init__(self, preds, labels, pos_weight, norm, d_real, d_fake): - preds_sub = preds - labels_sub = labels - - self.real = d_real - - # Discrimminator Loss - # self.dc_loss_real = tf.reduce_mean( - # tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(self.real), logits=self.real,name='dclreal')) - self.dc_loss_real = - tf.reduce_mean(self.real) - - # self.dc_loss_fake = tf.reduce_mean( - # tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_fake), logits=d_fake,name='dcfake')) - self.dc_loss_fake = tf.reduce_mean(d_fake) - GP_loss = tf.reduce_mean(tf.square(tf.sqrt(tf.reduce_mean(tf.square(gradient), axis=[0, 1])) - 1)) - self.dc_loss = self.dc_loss_fake + self.dc_loss_real + GP_loss - # self.dc_loss = self.dc_loss_fake + self.dc_loss_real - - # Generator loss - # generator_loss = tf.reduce_mean( - # tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_fake), logits=d_fake, name='gl')) - generator_loss = -self.dc_loss_fake - - # pos_weight,允许人们通过向上或向下加权相对于负误差的正误差的成本来权衡召回率和精确度 - self.cost = norm * tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(logits=preds_sub, targets=labels_sub, pos_weight=pos_weight)) - self.generator_loss = generator_loss + self.cost - - all_variables = tf.trainable_variables() - dc_var = [var for var in all_variables if 'dc_' in var.name] - en_var = [var for var in all_variables if 'e_' in var.name] - - with tf.variable_scope(tf.get_variable_scope()): - self.discriminator_optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.discriminator_learning_rate, - beta1=0.9, name='adam1').minimize(self.dc_loss, - var_list=dc_var) # minimize(dc_loss_real, var_list=dc_var) - - self.generator_optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.discriminator_learning_rate, - beta1=0.9, name='adam2').minimize(self.generator_loss, var_list=en_var) - - # 值得注意的是,这个地方,除了对抗优化之外, - # 还单纯用cost损失又优化了一遍, - # 待会儿看训练的时候注意看是在哪部分进行的这部分优化操作 - self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) # Adam Optimizer - self.opt_op = self.optimizer.minimize(self.cost) - self.grads_vars = self.optimizer.compute_gradients(self.cost) - - -class OptimizerCycle(object): - def __init__(self, preds, labels, pos_weight, norm, d_real, d_fake, GD_real, GD_fake, preds_z2g, labels_z2g, preds_cycle, labels_cycle, gradient, gradient_z): - preds_sub = preds - labels_sub = labels - - self.real = d_real - - # Discrimminator Loss - self.dc_loss_real = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(self.real), logits=self.real, name='dclreal')) - # self.dc_loss_real = - tf.reduce_mean(self.real) - self.dc_loss_fake = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_fake), logits=d_fake, name='dcfake')) - # self.dc_loss_fake = tf.reduce_mean(d_fake) - # GP_loss = tf.reduce_mean(tf.square(tf.sqrt(tf.reduce_mean(tf.square(gradient), axis = [0, 1])) - 1)) - # GP_loss_z = tf.reduce_mean(tf.square(tf.sqrt(tf.reduce_mean(tf.square(gradient_z), axis = [0, 1])) - 1)) - # self.dc_loss = self.dc_loss_fake + self.dc_loss_real + 10.0 * GP_loss - - self.GD_loss_real = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(GD_real), logits=GD_real, name='GD_real')) - # self.GD_loss_real = - tf.reduce_mean(GD_real) - self.GD_loss_fake = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(GD_fake), logits=GD_fake, name='GD_fake')) - # self.GD_loss_fake = tf.reduce_mean(GD_fake) - - self.dc_loss = self.dc_loss_fake + self.dc_loss_real - self.GD_loss = self.GD_loss_fake + self.GD_loss_real - - # Generator loss - generator_loss = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_fake), logits=d_fake, name='gl')) - # generator_loss = -self.dc_loss_fake - generator_loss_z2g = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(GD_fake), logits=GD_fake, name='G_z2g')) - # generator_loss_z2g = -self.GD_loss_fake - # pos_weight,允许人们通过向上或向下加权相对于负误差的正误差的成本来权衡召回率和精确度 - self.cost = norm * tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(logits=preds_sub, targets=labels_sub, pos_weight=pos_weight)) - - cost_cycle = norm * tf.reduce_mean(tf.square(preds_cycle - labels_cycle)) - - cost_z2g = norm * tf.reduce_mean(tf.square(preds_z2g - labels_z2g)) - # with tf.device("/gpu:1"): - # self.cost = 0.00001*self.cost + cost_cycle #for citseer cluster - self.cost = self.cost + cost_cycle - self.generator_loss = generator_loss + self.cost - self.generator_loss_z2g = generator_loss_z2g - - all_variables = tf.trainable_variables() - dc_var = [var for var in all_variables if 'dc_' in var.name] - en_var = [var for var in all_variables if 'e_' in var.name] - GG_var = [var for var in all_variables if 'GG' in var.name] - GD_var = [var for var in all_variables if 'GD' in var.name] - - with tf.variable_scope(tf.get_variable_scope()): - self.discriminator_optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.discriminator_learning_rate, - beta1=0.9, name='adam1').minimize(self.dc_loss, - var_list=dc_var) # minimize(dc_loss_real, var_list=dc_var) - - self.generator_optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.discriminator_learning_rate, - beta1=0.9, name='adam2').minimize(self.generator_loss, var_list=en_var) - - self.discriminator_optimizer_z2g = tf.train.AdamOptimizer(learning_rate=FLAGS.discriminator_learning_rate, - beta1=0.9, name='adam1').minimize(self.GD_loss, var_list=GD_var) - - self.generator_optimizer_z2g = tf.train.AdamOptimizer(learning_rate=FLAGS.discriminator_learning_rate, - beta1=0.9, name='adam2').minimize(self.generator_loss_z2g, var_list=GG_var) - - # 值得注意的是,这个地方,除了对抗优化之外, - # 还单纯用cost损失又优化了一遍, - # 待会儿看训练的时候注意看是在哪部分进行的这部分优化操作 - self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) # Adam Optimizer - self.opt_op = self.optimizer.minimize(self.cost) - # self.grads_vars = self.optimizer.compute_gradients(self.cost) - - # self.optimizer_z2g = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) # Adam Optimizer - # self.opt_op_z2g = self.optimizer.minimize(cost_z2g) - # self.grads_vars_z2g = self.optimizer.compute_gradients(cost_z2g) diff --git a/preprocessing.py b/preprocessing.py deleted file mode 100644 index 20bc07b..0000000 --- a/preprocessing.py +++ /dev/null @@ -1,135 +0,0 @@ -import numpy as np -import scipy.sparse as sp - - -def sparse_to_tuple(sparse_mx): - # 判断是否是coo_matrix,不是的话就转成coo_matrix - if not sp.isspmatrix_coo(sparse_mx): - sparse_mx = sparse_mx.tocoo() - coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose() - values = sparse_mx.data - shape = sparse_mx.shape - return coords, values, shape - - -def preprocess_graph(adj): - # A.sum(axis=1):计算矩阵的每一行元素之和,得到节点的度矩阵D - # np.power(x, n):数组元素求n次方,得到D^(-1/2) - # sp.diags()函数根据给定的对象创建对角矩阵,对角线上的元素为给定对象中的元素 - adj = sp.coo_matrix(adj) - adj_ = adj + sp.eye(adj.shape[0]) # A* = A+I,即对邻接矩阵加入自连接 - - rowsum = np.array(adj_.sum(1)) # 对行求和,即得到节点的度 - degree_mat_inv_sqrt = sp.diags(np.power(rowsum, -0.5).flatten()) # 得到D的-1/2次方矩阵d - adj_normalized = adj_.dot(degree_mat_inv_sqrt).transpose().dot(degree_mat_inv_sqrt).tocoo() # 这一步的实质是做归一化,即A* × d转置 × d - return sparse_to_tuple(adj_normalized) - -def construct_feed_dict(adj_normalized, adj, features, placeholders): - # construct feed dictionary - # .update()用法就是将()内的字段增加到dict当中 - feed_dict = dict() # 创建一个空字典 - feed_dict.update({placeholders['features']: features}) - feed_dict.update({placeholders['adj']: adj_normalized}) - feed_dict.update({placeholders['adj_orig']: adj}) - return feed_dict - - -def mask_test_edges(adj): - # Function to build test set with 10% positive links - # NOTE: Splits are randomized and results might slightly deviate from reported numbers in the paper. - # TODO: Clean up. - # sp.matrix(data,offsets)是将data的元素每列的元素,按offset里的顺序在列上进行重新排列,offset里的值是偏移量 - # 具体可以参考https://blog.csdn.net/ChenglinBen/article/details/84424379 - # .diagonal()就是提取对角线元素 - # Remove diagonal elements删除对角线元素 - adj = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape) - # 把零元素都消除掉 - adj.eliminate_zeros() - # Check that diag is zero: - # np.diag(matrix)即提取matrix的对角线元素,todense() like toarray(),区别是一个是将存储方式由稀疏矩阵转成正常矩阵,另一个是转成array - # assert检查是否对角线元素是否都被清空了 - assert np.diag(adj.todense()).sum() == 0 - - # sp.triu(matrix)获取matrix的上三角矩阵,相应的,tril()是获取下三角矩阵 - adj_triu = sp.triu(adj) - adj_tuple = sparse_to_tuple(adj_triu) - # edges相当于组合,因为是上三角矩阵的edge,所以减少了一半的重复量,(4.6)与(6,4)不会同时存在,而只会保留(4,6) - # edges_all相当于排列,就都包含了 - edges = adj_tuple[0] - edges_all = sparse_to_tuple(adj)[0] - # 取edge的10%作为test - # 取edge的20%作为val - num_test = int(np.floor(edges.shape[0] / 10.)) - num_val = int(np.floor(edges.shape[0] / 20.)) - - # 随机选取一部分作为test与val - all_edge_idx = list(range(edges.shape[0])) - np.random.shuffle(all_edge_idx) - val_edge_idx = all_edge_idx[:num_val] - test_edge_idx = all_edge_idx[num_val:(num_val + num_test)] - test_edges = edges[test_edge_idx] - val_edges = edges[val_edge_idx] - train_edges = np.delete(edges, np.hstack([test_edge_idx, val_edge_idx]), axis=0) - - # 该函数请参考github中gae的写法,应该是更新了,这种方法应该是错的,或者说与python3不兼容 - # 其中,return部分或许应该改成np.any(rows_close) - def ismember(a, b, tol=5): - # 该函数的作用就是判断a元素是否存在于b集合中 - rows_close = np.all(np.round(a - b[:, None], tol) == 0, axis=-1) - return np.any(rows_close) - # return (np.all(np.any(rows_close, axis=-1), axis=-1) and - # np.all(np.any(rows_close, axis=0), axis=0)) - - # test_edges_false是去生成一些本来就不存在的edges - test_edges_false = [] - while len(test_edges_false) < len(test_edges): - idx_i = np.random.randint(0, adj.shape[0]) - idx_j = np.random.randint(0, adj.shape[0]) - if idx_i == idx_j: - continue - if ismember([idx_i, idx_j], edges_all): - continue - if test_edges_false: - if ismember([idx_j, idx_i], np.array(test_edges_false)): - continue - if ismember([idx_i, idx_j], np.array(test_edges_false)): - continue - test_edges_false.append([idx_i, idx_j]) - - # val_edges_false生成一些不存在于train与val的edges - val_edges_false = [] - while len(val_edges_false) < len(val_edges): - idx_i = np.random.randint(0, adj.shape[0]) - idx_j = np.random.randint(0, adj.shape[0]) - if idx_i == idx_j: - continue - if ismember([idx_i, idx_j], train_edges): - continue - if ismember([idx_j, idx_i], train_edges): - continue - if ismember([idx_i, idx_j], val_edges): - continue - if ismember([idx_j, idx_i], val_edges): - continue - if val_edges_false: - if ismember([idx_j, idx_i], np.array(val_edges_false)): - continue - if ismember([idx_i, idx_j], np.array(val_edges_false)): - continue - val_edges_false.append([idx_i, idx_j]) - - assert ~ismember(test_edges_false, edges_all) -# assert ~ismember(val_edges_false, edges_all) - assert ~ismember(val_edges, train_edges) - assert ~ismember(test_edges, train_edges) - assert ~ismember(val_edges, test_edges) - - data = np.ones(train_edges.shape[0]) - - # Re-build adj matrix - # 如英文注释所说,这里将处理好的train_edges再重建出adj_train - adj_train = sp.csr_matrix((data, (train_edges[:, 0], train_edges[:, 1])), shape=adj.shape) - adj_train = adj_train + adj_train.T - - # NOTE: these edge lists only contain single direction of edge! - return adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false diff --git a/run.py b/run.py deleted file mode 100644 index 783f75a..0000000 --- a/run.py +++ /dev/null @@ -1,14 +0,0 @@ -import settings - -from link_prediction import Link_pred_Runner - -dataname = 'cora' # 'cora' or 'citeseer' or 'pubmed' -model = 'DBGAN' # 'arga_ae' or 'DBGAN' -task = 'link_prediction' - -settings = settings.get_settings(dataname, model, task) - -if task == 'link_prediction': - runner = Link_pred_Runner(settings) - -runner.erun() diff --git a/settings.py b/settings.py deleted file mode 100644 index 9581966..0000000 --- a/settings.py +++ /dev/null @@ -1,48 +0,0 @@ -import tensorflow as tf -import numpy as np - -flags = tf.app.flags -FLAGS = flags.FLAGS - -flags.DEFINE_integer('hidden3', 64, 'Number of units in hidden layer 3.') -flags.DEFINE_integer('discriminator_out', 0, 'discriminator_out.') -flags.DEFINE_float('discriminator_learning_rate', 0.001, 'Initial learning rate.') -flags.DEFINE_float('learning_rate', .6 * 0.001, 'Initial learning rate.') -flags.DEFINE_integer('hidden1', 32, 'Number of units in hidden layer 1.') # 64 for Citeseer and Pubmed -flags.DEFINE_integer('hidden2', 32, 'Number of units in hidden layer 2.') # 64 for Citeseer and Pubmed -flags.DEFINE_float('weight_decay', 0., 'Weight for L2 loss on embedding matrix.') -flags.DEFINE_float('dropout', 0., 'Dropout rate (1 - keep probability).') -flags.DEFINE_integer('features', 1, 'Whether to use features (1) or not (0).') -flags.DEFINE_integer('seed', 50, 'seed for fixing the results.') -flags.DEFINE_integer('iterations', 60, 'number of iterations.') - -''' -infor: number of clusters -''' -infor = {'cora': 7, 'citeseer': 6, 'pubmed': 3} - -''' -We did not set any seed when we conducted the experiments described in the paper; -We set a seed here to steadily reveal better performance of ARGA -''' -seed = 7 -np.random.seed(seed) -tf.set_random_seed(seed) - - -def get_settings(dataname, model, task): - if dataname != 'citeseer' and dataname != 'cora' and dataname != 'pubmed': - print('error: wrong data set name') - if task != 'clustering' and task != 'link_prediction': - print('error: wrong task name') - - if task == 'clustering': - iterations = FLAGS.iterations - clustering_num = infor[dataname] - re = {'data_name': dataname, 'iterations': iterations, 'clustering_num': clustering_num, 'model': model} - elif task == 'link_prediction': - iterations = 4 * FLAGS.iterations - print('epoch is', iterations) - re = {'data_name': dataname, 'iterations': iterations, 'model': model} - - return re diff --git a/model.py b/src/model.py similarity index 51% rename from model.py rename to src/model.py index a3d5016..d5618e4 100644 --- a/model.py +++ b/src/model.py @@ -1,9 +1,6 @@ -from layers import GraphConvolution, GraphConvolutionSparse, InnerProductDecoder +import numpy as np import tensorflow as tf -flags = tf.app.flags -FLAGS = flags.FLAGS - class Model(object): def __init__(self, **kwargs): @@ -40,22 +37,166 @@ class Model(object): pass +_LAYER_UIDS = {} + + +def get_layer_uid(layer_name=''): + """Helper function, assigns unique layer IDs + """ + if layer_name not in _LAYER_UIDS: + _LAYER_UIDS[layer_name] = 1 + return 1 + else: + _LAYER_UIDS[layer_name] += 1 + return _LAYER_UIDS[layer_name] + + +class Layer(object): + """Base layer class. Defines basic API for all layer objects. + + # Properties + name: String, defines the variable scope of the layer. + + # Methods + _call(inputs): Defines computation graph of layer + (i.e. takes input, returns output) + __call__(inputs): Wrapper for _call() + """ + + def __init__(self, **kwargs): + allowed_kwargs = {'name', 'logging'} + for kwarg in kwargs.keys(): + assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg + name = kwargs.get('name') + if not name: + layer = self.__class__.__name__.lower() + name = layer + '_' + str(get_layer_uid(layer)) + self.name = name + self.vars = {} + logging = kwargs.get('logging', False) + self.logging = logging + self.issparse = False + + def _call(self, inputs): + return inputs + + def __call__(self, inputs): + with tf.name_scope(self.name): + outputs = self._call(inputs) + return outputs + + +def weight_variable_glorot(input_dim, output_dim, name=""): + """Create a weight variable with Glorot & Bengio (AISTATS 2010) + initialization. + """ + init_range = np.sqrt(6.0 / (input_dim + output_dim)) + initial = tf.random_uniform([input_dim, output_dim], minval=-init_range, + maxval=init_range, dtype=tf.float32) + return tf.Variable(initial, name=name) + + +def dropout_sparse(x, keep_prob, num_nonzero_elems): + """ + Dropout for sparse tensors. Currently fails for very large sparse tensors (>1M elements) + num_nonzero_elems: The number of non-zero elements in the sparse matrix + keep_prob: + x: input + """ + noise_shape = [num_nonzero_elems] + random_tensor = keep_prob + random_tensor += tf.random_uniform(noise_shape) + dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool) + pre_out = tf.sparse_retain(x, dropout_mask) + return pre_out * (1. / keep_prob) + + +class GraphConvolutionSparse(Layer): + """ + Graph convolution layer for sparse inputs. + 多了一个features_nonzero + """ + + def __init__(self, input_dim, output_dim, adj, features_nonzero, dropout=0., act=tf.nn.relu, **kwargs): + super(GraphConvolutionSparse, self).__init__(**kwargs) + with tf.variable_scope(self.name + '_vars'): + self.vars['weights'] = weight_variable_glorot(input_dim, output_dim, name="weights") + self.dropout = dropout + self.adj = adj + self.act = act + self.issparse = True + self.features_nonzero = features_nonzero + + def _call(self, inputs): + x = inputs + x = dropout_sparse(x, 1 - self.dropout, self.features_nonzero) + x = tf.sparse_tensor_dense_matmul(x, self.vars['weights']) + x = tf.sparse_tensor_dense_matmul(self.adj, x) + outputs = self.act(x) + return outputs + + +def gaussian_noise_layer(input_layer, std): + noise = tf.random_normal(shape=tf.shape(input_layer), mean=0.0, stddev=std, dtype=tf.float32) + return input_layer + noise + + +class GraphConvolution(Layer): + """Basic graph convolution layer for undirected graph without edge labels.""" + + def __init__(self, input_dim, output_dim, adj, dropout=0., act=tf.nn.relu, **kwargs): + super(GraphConvolution, self).__init__(**kwargs) + with tf.variable_scope(self.name + '_vars'): + self.vars['weights'] = weight_variable_glorot(input_dim, output_dim, name="weights") + self.dropout = dropout + self.adj = adj + self.act = act + + def _call(self, inputs): + x = inputs + x = tf.nn.dropout(x, 1 - self.dropout) + x = tf.matmul(x, self.vars['weights']) + x = tf.sparse_tensor_dense_matmul(self.adj, x) + outputs = self.act(x) + return outputs + + +class InnerProductDecoder(Layer): + """Decoder model layer for link prediction.""" + + def __init__(self, input_dim, dropout=0., act=tf.nn.sigmoid, **kwargs): + super(InnerProductDecoder, self).__init__(**kwargs) + self.dropout = dropout + self.act = act + + def _call(self, inputs): + """ + 这个decoder部分实际上就只是input的转置再乘input + """ + inputs = tf.nn.dropout(inputs, 1 - self.dropout) + x = tf.transpose(inputs) + x = tf.matmul(inputs, x) + x = tf.reshape(x, [-1]) + outputs = self.act(x) + return outputs + + class GCN(Model): - def __init__(self, placeholders, num_features, features_nonzero, **kwargs): + def __init__(self, placeholders, num_features, features_nonzero, settings, **kwargs): super(GCN, self).__init__(**kwargs) """ - inputs:输入 - input_dim:feature的数量,即input的维度? - feature_nonzero:非0的特征 - adj:邻接矩阵 + inputs: Input features + input_dim: dimensionality + feature_nonzero:Non-zero feature number + adj: adjacency matrix dropout:dropout """ - self.inputs = placeholders['features'] self.input_dim = num_features self.features_nonzero = features_nonzero self.adj = placeholders['adj'] self.dropout = placeholders['dropout'] + self.settings = settings def construct(self, inputs=None, hidden=None, reuse=False): if inputs == None: @@ -63,7 +204,7 @@ class GCN(Model): with tf.variable_scope('Encoder', reuse=reuse): self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim, - output_dim=FLAGS.hidden1, + output_dim=self.settings.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, @@ -74,63 +215,20 @@ class GCN(Model): self.noise = gaussian_noise_layer(self.hidden1, 0.1) if hidden == None: hidden = self.hidden1 - self.embeddings = GraphConvolution(input_dim=FLAGS.hidden1, - output_dim=FLAGS.hidden2, + self.embeddings = GraphConvolution(input_dim=self.settings.hidden1, + output_dim=self.settings.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name='e_dense_2')(hidden) - self.z_mean = self.embeddings - - self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, + self.reconstructions = InnerProductDecoder(input_dim=self.settings.hidden2, act=lambda x: x, logging=self.logging)(self.embeddings) return self.z_mean, self.reconstructions -class Generator_z2g(Model): - def __init__(self, placeholders, num_features, features_nonzero, **kwargs): - super(Generator_z2g, self).__init__(**kwargs) - """ - inputs:输入 - input_dim:feature的数量,即input的维度? - feature_nonzero:非0的特征 - adj:邻接矩阵 - dropout:dropout - """ - - self.inputs = placeholders['real_distribution'] - self.input_dim = num_features - self.features_nonzero = features_nonzero - self.adj = placeholders['adj'] - self.dropout = placeholders['dropout'] - - def construct(self, inputs=None, reuse=False): - if inputs == None: - inputs = self.inputs - with tf.variable_scope('Decoder', reuse=reuse): - self.hidden1 = GraphConvolution(input_dim=FLAGS.hidden2, - output_dim=FLAGS.hidden1, - adj=self.adj, - act=tf.nn.relu, - dropout=self.dropout, - logging=self.logging, - name='GG_dense_1')(inputs) - - self.embeddings = GraphConvolution(input_dim=FLAGS.hidden1, - output_dim=self.input_dim, - adj=self.adj, - act=lambda x: x, - dropout=self.dropout, - logging=self.logging, - name='GG_dense_2')(self.hidden1) - - self.z_mean = self.embeddings - return self.z_mean, self.hidden1 - - def dense(x, n1, n2, name): """ Used to create a dense layer. @@ -150,10 +248,26 @@ def dense(x, n1, n2, name): return out +class Discriminator(Model): + def __init__(self, settings, **kwargs): + super(Discriminator, self).__init__(**kwargs) + self.act = tf.nn.relu + self.settings = settings + + def construct(self, inputs, reuse=False): + with tf.variable_scope('Discriminator'): + if reuse: + tf.get_variable_scope().reuse_variables() + tf.set_random_seed(1) + dc_den1 = tf.nn.relu(dense(inputs, self.settings.hidden2, self.settings.hidden3, name='dc_den1')) + dc_den2 = tf.nn.relu(dense(dc_den1, self.settings.hidden3, self.settings.hidden1, name='dc_den2')) + output = dense(dc_den2, self.settings.hidden1, 1, name='dc_output') + return output + + class D_graph(Model): def __init__(self, num_features, **kwargs): super(D_graph, self).__init__(**kwargs) - self.act = tf.nn.relu self.num_features = num_features @@ -170,25 +284,53 @@ class D_graph(Model): return output -def gaussian_noise_layer(input_layer, std): - noise = tf.random_normal(shape=tf.shape(input_layer), mean=0.0, stddev=std, dtype=tf.float32) - return input_layer + noise +class Generator_z2g(Model): + def __init__(self, placeholders, num_features, features_nonzero, settings, **kwargs): + super(Generator_z2g, self).__init__(**kwargs) + """ + inputs:输入 + input_dim:feature的数量,即input的维度? + feature_nonzero:非0的特征 + adj:邻接矩阵 + dropout:dropout + """ + self.inputs = placeholders['real_distribution'] + self.input_dim = num_features + self.features_nonzero = features_nonzero + self.adj = placeholders['adj'] + self.dropout = placeholders['dropout'] + self.settings = settings -class Discriminator(Model): - def __init__(self, **kwargs): - super(Discriminator, self).__init__(**kwargs) + def construct(self, inputs=None, reuse=False): + if inputs == None: + inputs = self.inputs + with tf.variable_scope('Decoder', reuse=reuse): + self.hidden1 = GraphConvolution(input_dim=self.settings.hidden2, + output_dim=self.settings.hidden1, + adj=self.adj, + act=tf.nn.relu, + dropout=self.dropout, + logging=self.logging, + name='GG_dense_1')(inputs) - self.act = tf.nn.relu + self.embeddings = GraphConvolution(input_dim=self.settings.hidden1, + output_dim=self.input_dim, + adj=self.adj, + act=lambda x: x, + dropout=self.dropout, + logging=self.logging, + name='GG_dense_2')(self.hidden1) - def construct(self, inputs, reuse=False): - # with tf.name_scope('Discriminator'): - with tf.variable_scope('Discriminator'): - if reuse: - tf.get_variable_scope().reuse_variables() - # np.random.seed(1) - tf.set_random_seed(1) - dc_den1 = tf.nn.relu(dense(inputs, FLAGS.hidden2, FLAGS.hidden3, name='dc_den1')) - dc_den2 = tf.nn.relu(dense(dc_den1, FLAGS.hidden3, FLAGS.hidden1, name='dc_den2')) - output = dense(dc_den2, FLAGS.hidden1, 1, name='dc_output') - return output + self.z_mean = self.embeddings + return self.z_mean, self.hidden1 + + +class BGAN(object): + def __init__(self, placeholders, num_features, num_nodes, features_nonzero, settings): + self.discriminator = Discriminator(settings) + self.D_Graph = D_graph(num_features) + self.d_real = self.discriminator.construct(placeholders['real_distribution']) + self.GD_real = self.D_Graph.construct(placeholders['features_dense']) + self.ae_model = GCN(placeholders, num_features, features_nonzero, settings) + self.model_z2g = Generator_z2g(placeholders, num_features, features_nonzero, settings) diff --git a/src/train.py b/src/train.py new file mode 100644 index 0000000..705e769 --- /dev/null +++ b/src/train.py @@ -0,0 +1,105 @@ +import argparse + +import numpy as np +import tensorflow as tf +from dppy.finite_dpps import FiniteDPP +from sklearn.decomposition import PCA +from sklearn.neighbors import KernelDensity + +from input import get_data +from src.evaluation import Evaluator +from src.model import BGAN +from src.optimizer import Optimizer, update + +# 解析参数 +def parse_args(): + parser = argparse.ArgumentParser(description='BGANDTI') + parser.add_argument('--hidden1', type=int, default=32, help='隐藏层1神经元数量.') + parser.add_argument('--hidden2', type=int, default=32, help='隐藏层2神经元数量.') + parser.add_argument('--hidden3', type=int, default=64, help='隐藏层3神经元数量.') + parser.add_argument('--learning_rate', type=float, default=.6 * 0.001, help='学习率') + parser.add_argument('--discriminator_learning_rate', type=float, default=0.001, help='判别器学习率') + parser.add_argument('--epoch', type=int, default=20, help='迭代次数') + parser.add_argument('--seed', type=int, default=50, help='用来打乱数据集') + parser.add_argument('--features', type=int, default=1, help='是(1)否(0)使用特征') + parser.add_argument('--dropout', type=float, default=0., help='Dropout rate (1 - keep probability).') + parser.add_argument('--weight_decay', type=float, default=0., help='Weight for L2 loss on embedding matrix.') + parser.add_argument('--dataset', type=str, default='e', help='使用的数据集') + + args = parser.parse_args() + return args + + +if __name__ == "__main__": + settings = parse_args() + + # 读数据 + feas = get_data(settings.dataset) + + # DPP采样和PCA降维 + DPP = FiniteDPP('correlation', **{'K': feas['adj'].toarray()}) + pca = PCA(n_components=settings.hidden2) + DPP.sample_exact_k_dpp(size=21) # e 21 ic 6 gpcr 3 + index = DPP.list_of_samples[0] + feature_sample = feas['features_dense'] + feature_sample = pca.fit_transform(feature_sample) + kde = KernelDensity(bandwidth=0.7).fit(np.array([feature_sample[i] for i in index])) + + # 计算图中预输入数据 + placeholders = { + 'features': tf.sparse_placeholder(tf.float32), + 'features_dense': tf.placeholder(tf.float32, shape=[feas['adj'].shape[0], feas['num_features']], name='real_distribution'), + 'adj': tf.sparse_placeholder(tf.float32), + 'adj_orig': tf.sparse_placeholder(tf.float32), + 'dropout': tf.placeholder_with_default(0., shape=()), + 'real_distribution': tf.placeholder(dtype=tf.float32, shape=[feas['adj'].shape[0], settings.hidden2], name='real_distribution') + } + + # 构造模型 + # d_real, discriminator, ae_model, model_z2g, D_Graph, GD_real = DBGAN(placeholders, feas['num_features'], feas['num_nodes'], feas['features_nonzero'], settings) + model = BGAN(placeholders, feas['num_features'], feas['num_nodes'], feas['features_nonzero'], settings) + + # 定义优化器 + # opt = Optimizer(ae_model, model_z2g, D_Graph, discriminator, placeholders, feas['pos_weight'], feas['norm'], d_real, feas['num_nodes'], GD_real) + optimizer = Optimizer(model.ae_model, model.model_z2g, model.D_Graph, model.discriminator, placeholders, feas['pos_weight'], feas['norm'], model.d_real, feas['num_nodes'], model.GD_real, + settings) + + # 初始化会话和权重 + # 配置显存自动增长 + config = tf.ConfigProto() + config.gpu_options.allow_growth = True + sess = tf.Session(config=config) + # sess = tf.Session() + sess.run(tf.global_variables_initializer()) + + # 存储不同阶段结果 + val_roc_score = [] + record = [] + record_emb = [] + + # Train model + for epoch in range(settings.epoch): + + emb, avg_cost = update(model.ae_model, optimizer.opt, sess, feas['adj_norm'], feas['adj_label'], feas['features'], placeholders, feas['adj'], kde, feas['features_dense'], settings) + + lm_train = Evaluator(feas['val_edges'], feas['val_edges_false']) + roc_curr, ap_curr, _, aupr_score = lm_train.get_roc_score(emb, feas) + val_roc_score.append(roc_curr) + print("Epoch:", '%04d' % (epoch + 1), + "train_loss={:.5f}, d_loss={:.5f}, g_loss={:.5f}, GD_loss={:.5f}, GG_loss={:.5f}".format(avg_cost[0], avg_cost[1], avg_cost[2], avg_cost[3], avg_cost[4]), + "val_roc={:.5f}".format(val_roc_score[-1]), "val_ap=", "{:.5f}".format(ap_curr), "val_aupr=", "{:.5f}".format(aupr_score)) + + if (epoch + 1) % 10 == 0: + lm_test = Evaluator(feas['test_edges'], feas['test_edges_false']) + roc_score, ap_score, _, aupr_score = lm_test.get_roc_score(emb, feas) + print('Test ROC score: ' + str(roc_score), 'Test AUPR score: ' + str(aupr_score), 'Test AP score: ' + str(ap_score)) + record.append([roc_score, aupr_score, ap_score]) + record_emb.append(emb) + rec = np.array(record) + # index = rec[:, 0].tolist().index(max(rec[:, 0].tolist())) + # index_pr = rec[:, 1].tolist().index(max(rec[:, 1].tolist())) + emb = record_emb[rec[:, 0].tolist().index(max(rec[:, 0].tolist()))] + ana = record[rec[:, 0].tolist().index(max(rec[:, 0].tolist()))] + ana_pr = record[rec[:, 1].tolist().index(max(rec[:, 1].tolist()))] + print('The peak [auc] test_roc={:.7f}, aupr={:.7f}, ap={:.7f}'.format(ana[0], ana[1], ana[2])) + print('The peak [aupr] test_roc={:.7f}, aupr={:.7f}, ap={:.7f}'.format(ana_pr[0], ana_pr[1], ana_pr[2]))