Linux ip-172-26-2-223 5.4.0-1018-aws #18-Ubuntu SMP Wed Jun 24 01:15:00 UTC 2020 x86_64
Apache
: 172.26.2.223 | : 3.137.213.117
Cant Read [ /etc/named.conf ]
8.1.13
www
www.github.com/MadExploits
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
UNLOCK SHELL
HASH IDENTIFIER
CPANEL RESET
CREATE WP USER
BLACK DEFEND!
README
+ Create Folder
+ Create File
/
www /
server /
mysql /
mysql-test /
suite /
ndb_rpl /
r /
[ HOME SHELL ]
Name
Size
Permission
Action
ndb_dist_priv_common.result
2.33
KB
-rw-r--r--
ndb_rpl_2innodb.result
41.34
KB
-rw-r--r--
ndb_rpl_2myisam.result
41.34
KB
-rw-r--r--
ndb_rpl_2ndb.result
16.17
KB
-rw-r--r--
ndb_rpl_2other.result
32.71
KB
-rw-r--r--
ndb_rpl_add_column.result
5.99
KB
-rw-r--r--
ndb_rpl_apply_status.result
907
B
-rw-r--r--
ndb_rpl_auto_inc.result
3.72
KB
-rw-r--r--
ndb_rpl_bank.result
10.82
KB
-rw-r--r--
ndb_rpl_basic.result
13.52
KB
-rw-r--r--
ndb_rpl_batch.result
2.48
KB
-rw-r--r--
ndb_rpl_batch_handling.result
1.91
KB
-rw-r--r--
ndb_rpl_binlog_format_errors.r...
1.27
KB
-rw-r--r--
ndb_rpl_bitfield.result
14.48
KB
-rw-r--r--
ndb_rpl_blob.result
7.29
KB
-rw-r--r--
ndb_rpl_break_3_chain.result
3.17
KB
-rw-r--r--
ndb_rpl_bug22045.result
2.56
KB
-rw-r--r--
ndb_rpl_bug_13440282.result
2.3
KB
-rw-r--r--
ndb_rpl_check_for_mixed.result
483
B
-rw-r--r--
ndb_rpl_checksum.result
6.31
KB
-rw-r--r--
ndb_rpl_circular.result
5.13
KB
-rw-r--r--
ndb_rpl_circular_2ch.result
2.27
KB
-rw-r--r--
ndb_rpl_circular_2ch_rep_statu...
7.47
KB
-rw-r--r--
ndb_rpl_circular_simplex.resul...
1.28
KB
-rw-r--r--
ndb_rpl_conflict_basic.result
18.2
KB
-rw-r--r--
ndb_rpl_conflict_epoch.result
71.03
KB
-rw-r--r--
ndb_rpl_conflict_epoch2.result
143.62
KB
-rw-r--r--
ndb_rpl_conflict_epoch2_extra....
63.18
KB
-rw-r--r--
ndb_rpl_conflict_epoch2_trans....
47.12
KB
-rw-r--r--
ndb_rpl_conflict_epoch_ext.res...
106.6
KB
-rw-r--r--
ndb_rpl_conflict_epoch_extende...
13.51
KB
-rw-r--r--
ndb_rpl_conflict_epoch_trans.r...
23.13
KB
-rw-r--r--
ndb_rpl_conflict_epoch_trans_e...
23.62
KB
-rw-r--r--
ndb_rpl_conflict_max.result
36.46
KB
-rw-r--r--
ndb_rpl_conflict_max_delete_wi...
35.68
KB
-rw-r--r--
ndb_rpl_conflict_old.result
36.65
KB
-rw-r--r--
ndb_rpl_conflict_read_tracking...
29.94
KB
-rw-r--r--
ndb_rpl_ctype_ucs2_def.result
703
B
-rw-r--r--
ndb_rpl_dd_advance.result
8.9
KB
-rw-r--r--
ndb_rpl_dd_basic.result
2.4
KB
-rw-r--r--
ndb_rpl_dd_partitions.result
35.75
KB
-rw-r--r--
ndb_rpl_ddl_open_trans.result
1.87
KB
-rw-r--r--
ndb_rpl_dist_priv.result
11.31
KB
-rw-r--r--
ndb_rpl_do_db.result
1.44
KB
-rw-r--r--
ndb_rpl_do_table.result
1.13
KB
-rw-r--r--
ndb_rpl_empty_epoch.result
1.17
KB
-rw-r--r--
ndb_rpl_gap_event.result
1.38
KB
-rw-r--r--
ndb_rpl_idempotent.result
3.06
KB
-rw-r--r--
ndb_rpl_ignore_db.result
1.08
KB
-rw-r--r--
ndb_rpl_init_rep_status.result
2.17
KB
-rw-r--r--
ndb_rpl_innodb2ndb.result
41
KB
-rw-r--r--
ndb_rpl_innodb_trans.result
3.08
KB
-rw-r--r--
ndb_rpl_last_conflict_epoch_va...
2.06
KB
-rw-r--r--
ndb_rpl_load.result
1.27
KB
-rw-r--r--
ndb_rpl_logging.result
2.66
KB
-rw-r--r--
ndb_rpl_mix_eng_trans.result
10.51
KB
-rw-r--r--
ndb_rpl_mix_innodb.result
4.35
KB
-rw-r--r--
ndb_rpl_mixed_tables.result
7.28
KB
-rw-r--r--
ndb_rpl_multi.result
2.33
KB
-rw-r--r--
ndb_rpl_myisam2ndb.result
41
KB
-rw-r--r--
ndb_rpl_ndbapi-examples.result
522
B
-rw-r--r--
ndb_rpl_rep_error.result
16.79
KB
-rw-r--r--
ndb_rpl_rep_ignore.result
1.4
KB
-rw-r--r--
ndb_rpl_skip_gap_event.result
1.27
KB
-rw-r--r--
ndb_rpl_slave_binlog_index.res...
1.83
KB
-rw-r--r--
ndb_rpl_slave_conflict_role_va...
9.63
KB
-rw-r--r--
ndb_rpl_slave_lsu.result
53.8
KB
-rw-r--r--
ndb_rpl_slave_lsu_anyval.resul...
54.73
KB
-rw-r--r--
ndb_rpl_slave_replay.result
6.42
KB
-rw-r--r--
ndb_rpl_slave_restart.result
1.93
KB
-rw-r--r--
ndb_rpl_stm_innodb.result
4.38
KB
-rw-r--r--
ndb_rpl_sync.result
2.38
KB
-rw-r--r--
ndb_rpl_ui.result
1.86
KB
-rw-r--r--
ndb_rpl_ui2.result
813
B
-rw-r--r--
ndb_rpl_ui3.result
1.22
KB
-rw-r--r--
rpl_truncate_7ndb.result
3.27
KB
-rw-r--r--
rpl_truncate_7ndb_2.result
3.27
KB
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : ndb_rpl_circular_2ch_rep_status.result
include/rpl_init.inc [topology=1->2,4->3] Warnings: Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. Warnings: Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. include/rpl_connect.inc [creating master] include/rpl_connect.inc [creating master1] include/rpl_connect.inc [creating slave] include/rpl_connect.inc [creating slave1] include/rpl_start_slaves.inc Cluster A servers have no epoch replication info select count(1) from mysql.ndb_apply_status; count(1) 0 Cluster A servers have no max replicated epoch value Master(1) select variable_name, variable_value from information_schema.global_status where variable_name='Ndb_slave_max_replicated_epoch'; variable_name variable_value NDB_SLAVE_MAX_REPLICATED_EPOCH 0 Warnings: Warning 1287 'INFORMATION_SCHEMA.GLOBAL_STATUS' is deprecated and will be removed in a future release. Please use performance_schema.global_status instead Master1(3) select variable_name, variable_value from information_schema.global_status where variable_name='Ndb_slave_max_replicated_epoch'; variable_name variable_value NDB_SLAVE_MAX_REPLICATED_EPOCH 0 Warnings: Warning 1287 'INFORMATION_SCHEMA.GLOBAL_STATUS' is deprecated and will be removed in a future release. Please use performance_schema.global_status instead Make a change originating at Cluster A Master(1) use test; create table t1 (a int primary key, b varchar(100)) engine=ndb; insert into t1 values (1, "Venice"); Allow it to propagate to Cluster B Originate a second unrelated change at Cluster B, to allow us to wait for reverse propagation in the testcase Slave1 (4) insert into t1 values (2, "Death"); Allow it to propagate to Cluster A Observe new entry in ndb_apply_status on Cluster A Master (1) select server_id from mysql.ndb_apply_status order by server_id; server_id 1 4 Non-slave server on Cluster A will have no value for Max Replicated Epoch select variable_name, variable_value from information_schema.global_status where variable_name='Ndb_slave_max_replicated_epoch'; variable_name variable_value NDB_SLAVE_MAX_REPLICATED_EPOCH 0 Warnings: Warning 1287 'INFORMATION_SCHEMA.GLOBAL_STATUS' is deprecated and will be removed in a future release. Please use performance_schema.global_status instead Slave server on Cluster A has current value for Max Replicated Epoch Master1 (3) Expect count 1 select count(1) from information_schema.global_status, mysql.ndb_apply_status where server_id = 1 and variable_name='Ndb_slave_max_replicated_epoch' and variable_value = epoch; count(1) 1 Warnings: Warning 1287 'INFORMATION_SCHEMA.GLOBAL_STATUS' is deprecated and will be removed in a future release. Please use performance_schema.global_status instead Now wait for all replication to quiesce Now swap replication channels around include/rpl_stop_slaves.inc include/rpl_change_topology.inc [new topology=2->1,3->4] Warnings: Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. Warnings: Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. Get current master status on Cluster A new master (next pos in Binlog) Master1 (3) Flush logs to ensure any pending update (e.g. reflected apply_status write row) is skipped over. flush logs; Setup slave on Cluster B to use it Slave1 (4) Get current master status on Cluster B new master (next pos in Binlog) Slave (2) Flush logs to ensure any pending update (e.g. reflected apply_status write row) is skipped over. flush logs; Setup slave on Cluster A to use it Master (1) Master (1) Show that Cluster A Slave server (old master) has no Max replicated epoch before receiving data select variable_name, variable_value from information_schema.global_status where variable_name='Ndb_slave_max_replicated_epoch'; variable_name variable_value NDB_SLAVE_MAX_REPLICATED_EPOCH 0 Warnings: Warning 1287 'INFORMATION_SCHEMA.GLOBAL_STATUS' is deprecated and will be removed in a future release. Please use performance_schema.global_status instead Master1 (3) Cluster A Master server (old slave) has old Max replicated epoch select count(1) from information_schema.global_status, mysql.ndb_apply_status where server_id = 1 and variable_name='Ndb_slave_max_replicated_epoch' and variable_value = epoch; count(1) 1 Warnings: Warning 1287 'INFORMATION_SCHEMA.GLOBAL_STATUS' is deprecated and will be removed in a future release. Please use performance_schema.global_status instead Now start slaves up include/rpl_start_slaves.inc Show that applying something from Cluster B causes the old Max Rep Epoch to be loaded from ndb_apply_status There is no new Max Rep Epoch from Cluster A as it has not changed anything yet Slave (2) insert into test.t1 values (3, "From the Sea"); Allow to propagate to Cluster A Master (1) New Slave server on Cluster A has loaded old Max-Replicated-Epoch select server_id from mysql.ndb_apply_status order by server_id; server_id 1 2 4 select @result:=count(1) from information_schema.global_status, mysql.ndb_apply_status where server_id = 1 and variable_name='Ndb_slave_max_replicated_epoch' and variable_value = epoch; @result:=count(1) 1 Warnings: Warning 1287 'INFORMATION_SCHEMA.GLOBAL_STATUS' is deprecated and will be removed in a future release. Please use performance_schema.global_status instead Now make a new Cluster A change and see that the Max Replicated Epoch advances once it has propagated Master1 (3) insert into test.t1 values (4, "Brooke"); Propagate to Cluster B Make change on Cluster B to allow waiting for reverse propagation Slave (2) insert into test.t1 values (5, "Rupert"); Wait for propagation back to Cluster A Master (1) Show that Cluster A now has 2 different server_id entries in ndb_apply_status Those from the new master (server_id 3) are highest. select server_id from mysql.ndb_apply_status order by server_id; server_id 1 2 3 4 select count(1) from information_schema.global_status, mysql.ndb_apply_status where server_id = 3 and variable_name='Ndb_slave_max_replicated_epoch' and variable_value = epoch; count(1) 1 Warnings: Warning 1287 'INFORMATION_SCHEMA.GLOBAL_STATUS' is deprecated and will be removed in a future release. Please use performance_schema.global_status instead local_server_with_max_epoch 3 Done drop table t1; include/rpl_stop_slaves.inc CHANGE MASTER TO IGNORE_SERVER_IDS= (); CHANGE MASTER TO IGNORE_SERVER_IDS= (); CHANGE MASTER TO IGNORE_SERVER_IDS= (); CHANGE MASTER TO IGNORE_SERVER_IDS= (); include/rpl_start_slaves.inc include/rpl_end.inc
Close