-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbugzilla.py
More file actions
2429 lines (2141 loc) · 93.9 KB
/
bugzilla.py
File metadata and controls
2429 lines (2141 loc) · 93.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Perforce Defect Tracking Integration Project
# <http://www.ravenbrook.com/project/p4dti/>
#
# BUGZILLA.PY -- INTERFACE TO BUGZILLA
#
# Nick Barnes, Ravenbrook Limited, 2000-11-21
#
#
# 1. INTRODUCTION
#
# This module defines a Python interface to the Bugzilla database. Its
# design is documented in [NB 2000-11-14c]. It accesses and updates
# data according to the Bugzilla schema [NB 2000-11-14a] and schema
# extensions [NB 2000-11-14b].
#
# The intended readership of this document is project developers.
#
# This document is not confidential.
import catalog
import os
import re
import string
import types
error = 'Bugzilla database error'
class bugzilla:
schema_version = '5'
# particular Bugzilla features. Maybe should have a 'feature'
# dictionary.
features = {}
bugzilla_version = None
db = None
cursor = None
rid = None
sid = None
replication = None
logger = None
bugmail_commands = None
cache = None
# 2. BUGZILLA INTERFACE
def __init__(self, db, config):
self.db = db
self.cache = {}
self.bugmail_commands = []
self.logger = config.logger
self.cursor = self.db.cursor()
self.rid = config.rid
self.sid = config.sid
self.bugzilla_directory = config.bugzilla_directory
self.bugmail_command = config.bugmail_command
self.check_mysql_version()
self.check_bugzilla_version()
self.update_p4dti_schema()
# Make a configuration dictionary and pass it to set_config to
# ensure that the copy of the configuration in the Bugzilla
# database is up-to-date.
c = {
'replicator_user': config.replicator_address,
'p4_server_description': config.p4_server_description,
}
if config.changelist_url is not None:
c['changelist_url'] = config.changelist_url
if config.job_url is not None:
c['job_url'] = config.job_url
self.set_config(c)
# Fetch Bugzilla's configuration parameters (if they can be
# found in the database).
self.fetch_bugzilla_config()
# Check whether the MySQL database character set
# settings are appropriate for UTF8 replication.
self.check_utf8_config()
def log(self, id, args = ()):
msg = catalog.msg(id, args)
self.logger.log(msg)
# 3. DATABASE INTERFACE
#
# The Python database interface [DBAPI 2.0] is very basic. This
# section bulds up some layers of abstraction, providing logging
# (section 3.1), checking and conversion (section 3.2), quoting
# (section 3.3).
# 3.1. SQL wrappers
#
# These three methods directly wrap methods in the database
# interface [DBAPI 2.0], logging the executed SQL commands and
# the returned results.
# execute(sql) executes the given SQL command and returns the number
# of rows returned.
def execute(self, sql, params=None):
assert isinstance(sql, basestring)
# "Executing SQL command '%s'."
self.log(100, repr((sql, params)))
self.cursor.execute(sql, params)
rows = self.cursor.rowcount
# "MySQL returned '%s'."
self.log(101, repr(rows))
return rows
# fetchone() fetches one row from the current result set and returns
# it as a sequence.
def fetchone(self):
row = self.cursor.fetchone()
# "fetchone() returned '%s'."
self.log(102, repr(row))
return row
# fetchall() fetches all the rows from the current result and
# returns them as a sequence of sequences.
def fetchall(self):
rows = self.cursor.fetchall()
# "fetchall() returned '%s'."
self.log(103, repr(rows))
return rows
# 3.2. Select methods
#
# These methods select rows from the database, checking that the
# results are as expected, and converting the results into various
# data structures.
#
# In all of these select methods, the description argument is a
# description of the data being selected; it's used in error
# messages.
# select_one_row(select, description) executes the SQL select
# statement, checks that it returns exactly one row, and returns the
# row as a sequence.
def select_one_row(self, select, description):
nrows = self.execute(select)
if nrows == 0:
# "Select '%s' of %s returns no rows."
raise error, catalog.msg(106, (select, description))
elif nrows > 1:
# "Select '%s' of %s expecting one row but returns %d."
raise error, catalog.msg(107, (select, description, nrows))
elif self.cursor.description == None:
# "Trying to fetch a row from non-select '%s'."
raise error, catalog.msg(108, select)
row = self.fetchone()
if row == None:
# "Select '%s' of %s returned an unfetchable row."
raise error, catalog.msg(109, (select, description))
else:
return row
# select_rows(select, description) executes the SQL select
# statement, checks that it executed correctly, and returns all the
# results as a sequence of sequences.
def select_rows(self, select, description):
self.execute(select)
if self.cursor.description == None:
# "Trying to fetch rows from non-select '%s'."
raise error, catalog.msg(110, select)
rows = self.fetchall()
if rows == None:
# "Select '%s' of %s returned unfetchable rows."
raise error, catalog.msg(111, (select, description))
else:
return rows
# select_at_most_one_row(select, description) executes the SQL
# select statement, check that it returns at most one row, and
# returns the row as a sequence, or None if there was no row.
def select_at_most_one_row(self, select, description):
rows = self.select_rows(select, description)
if len(rows) == 0:
return None
elif len(rows) == 1:
return rows[0]
else:
# "Select '%s' of %s expecting no more than one row but
# returns %d."
raise error, catalog.msg(112, (select, description, rows))
# column_names() returns a list of the column names of the results
# of the most recent select. (It will raise a TypeError if the most
# recent operation was not a select.)
def column_names(self):
return map(lambda d:d[0], self.cursor.description)
# row_to_dictionary(row, columns, select, description) takes a row
# from the results of the most recent select statement and returns
# it as a dictionary mapping column name to value. The columns
# argument is a sequence of column names for the results of the
# select statement; the select argument is the most recent SQL
# select statement; and description is a description of the data
# being selected. (The select and description arguments are used in
# error messages.)
def row_to_dictionary(self, row, columns, select, description):
if len(columns) != len(row):
# "Select '%s' of %s returns %d columns but %d values."
raise error, catalog.msg(113, (select, description,
len(columns), len(row)))
dict = {}
for i in range(len(columns)):
dict[columns[i]] = row[i]
return dict
# fetch_one_row_as_dictionary(select, description) executes the SQL
# select statement, checks that it returns exactly one row, and
# return that row as a dictionary mapping column name to value.
def fetch_one_row_as_dictionary(self, select, description):
row = self.select_one_row(select, description)
columns = self.column_names()
return self.row_to_dictionary(row, columns, select, description)
# fetch_at_most_one_row_as_dictionary(select, description) executes
# the SQL select statement, check that it returns at most one row,
# and returns the row as a dictionary mapping column name to value,
# or None if there was no row.
def fetch_at_most_one_row_as_dictionary(self, select, description):
row = self.select_at_most_one_row(select, description)
if row == None:
return None
columns = self.column_names()
return self.row_to_dictionary(row, columns, select, description)
# fetch_rows_as_list_of_dictionaries(select, description) executes
# the SQL select statement, and returns the results as a list of
# dictionaries mapping column name to value.
def fetch_rows_as_list_of_dictionaries(self, select, description):
rows = self.select_rows(select, description)
columns = self.column_names()
def r2d(row, self=self, c=columns, s=select, d=description):
return self.row_to_dictionary(row, c, s, d)
return map(r2d, rows)
# fetch_rows_as_list_of_sequences(select, description) executes the
# SQL select statement, and returns the result as a list of
# sequences.
def fetch_rows_as_list_of_sequences(self, select, description):
rows = self.select_rows(select, description)
# select_rows may be any sequence type; we want a list.
return list(rows)
# fetch_simple_rows_as_dictionary(select, description) executes the
# SQL select statement and returns a dictionary mapping the value in
# the first column to the value in the second.
def fetch_simple_rows_as_dictionary(self, select, description):
rows = self.fetch_rows_as_list_of_sequences(select, description)
dict = {}
for row in rows:
dict[row[0]] = row[1]
return dict
# Get the set of table names.
def table_names(self):
if not self.cache.has_key('table names'):
tables = self.fetch_rows_as_list_of_sequences('show tables',
'list all tables')
# Use table names only.
self.cache['table names'] = map(lambda x:x[0], tables)
return self.cache['table names']
# 4. QUOTATION
def quote_string(self, s):
return "'%s'" % self.db.escape_string(s)
#
# We now use MySQLdb's parameter-passing mechanism for most
# arguments; see [DBAPI 2.0]). The remaining code here enables us
# to write SQL expressions which aren't possible with the %s
# parameter-passing, and which we need for particular fields.
#
# - now() in a datetime field when passing an empty string;
# - encrypt(password) in an encrypted field when passed an empty string;
# - encrypt(%s) in an encrypted field.
# if_empty_then_now(value) is the quote method for timestamps
# fields.
def if_empty_then_now(self, value):
if value == '':
return 'now()'
else:
return ('%s', value)
# cryptpassword(value) is the quote method for encrypted passwords.
def cryptpassword(self, value):
if value == '':
return 'encrypt(password)'
else:
return ('encrypt(%s)', value)
# quote_table maps (table name, field name) to the quote method for
# that field.
quote_table = {
('bugs', 'creation_ts'): if_empty_then_now,
('bugs', 'delta_ts'): if_empty_then_now,
('longdescs', 'bug_when'): if_empty_then_now,
('p4dti_bugs', 'migrated'): if_empty_then_now,
('p4dti_replications', 'end'): if_empty_then_now,
('profiles', 'cryptpassword'): cryptpassword,
}
# quote(table, field, value) quotes the value for inclusion in a SQL
# command, for inclusion in the given field in the given table.
def quote(self, table, field, value):
quoter = self.quote_table.get((table, field))
if quoter:
return quoter(self, value)
else:
return ('%s', value)
# 5. TYPES
#
#
# 5.1. MySQL column types
#
# These functions allow us to interrogate the database schema and
# determine column types. A column type is returned from MySQL as a
# row with these columns (note that we don't make use of 'Key' or
# 'Extra'):
#
# Field The column name.
# Type SQL type.
# Default Default value or None.
# Null 'YES' if Null is allowed, '' if not.
# Key How column is indexed ('PRI', 'UNI', 'MUL', or '').
# Extra Column attributes (for example, auto_increment).
#
#
# 5.2. P4DTI column types
#
# We decode the type into a dictionary with these keys:
#
# field The column name.
# type The SQL type (enum/int/float/date/timestamp/text);
# 'user' if it contains a Bugzilla user id; 'other' if
# we don't recognise it.
# length Length (for text and integer fields).
# null Null allowed? (0 or 1)
# default The default value or None.
# values Legal values (for enum fields).
# sql_type The original SQL type.
# user_fields is a dictionary mapping (table name, field name) to
# to a suitable Perforce default value, for user fields only
# (their database type is integer but we need to treat them
# specially).
user_fields = {
('bugs', 'assigned_to') : '$user',
('bugs', 'reporter'): '$user',
('bugs', 'qa_contact'): 'None',
}
# convert_type(table, dict) converts dict, a MySQL column
# description (section 5.1) for the specified table, into a P4DTI
# column description (section 5.2) and returns it.
def convert_type(self, table, dict):
name = dict['Field']
sql_type = dict['Type']
column = {
'field': name,
'sql_type': sql_type,
'default': dict['Default'],
'null': dict['Null'] == 'YES',
}
# User fields.
if self.user_fields.has_key((table, name)):
column['type'] = 'user'
column['default'] = self.user_fields[(table, name)]
return column
# Enumerated fields.
match = re.match("^enum\('(.*)'\)$", sql_type)
if match:
enum_values = string.split(match.group(1), "','")
column['type'] = 'enum'
column['length'] = max(map(len, enum_values))
column['values'] = enum_values
return column
# Integer fields.
match = re.match("^(tinyint|smallint|mediumint|int|bigint)"
"\((.*)\)$", sql_type)
if match:
column['type'] = 'int'
column['length'] = int(match.group(2))
return column
# Date fields.
match = re.match("^datetime", sql_type)
if match:
column['type'] = 'date'
# We don't support default dates.
column['default'] = None
return column
# Timestamp fields.
match = re.match("^timestamp", sql_type)
if match:
column['type'] = 'timestamp'
# We don't support default timestamps.
column['default'] = None
return column
# Sized text fields.
match = re.match("^(char|varchar)\((.*)\)$", sql_type)
if match:
column['type'] = 'text'
column['length'] = int(match.group(2))
return column
# Implicit-sized text fields.
text_length = {
'tinyblob': 0xff,
'tinytext': 0xff,
'blob': 0xffff,
'text': 0xffff,
'mediumblob': 0xffffff,
'mediumtext': 0xffffff,
'longblob': 0xffffffffL,
'longtext': 0xffffffffL,
}
if text_length.has_key(sql_type):
column['type'] = 'text'
column['length'] = text_length[sql_type]
return column
# Floating-point fields.
match = re.match("^(float|double|decimal)", sql_type)
if match:
column['type'] = 'float'
return column
# Field types we don't know how to handle includes date, time,
# year, set(...). We don't raise an exception here because we
# might not look at this field so might not care that we don't
# know what type it is.
column['type'] = 'other'
return column
def get_columns(self, table):
return self.fetch_rows_as_list_of_dictionaries(
'describe %s;' % table, 'describe %s' % table)
# get_types(table) returns a dictionary mapping name to type for all
# the columns in the table.
def get_types(self, table):
results = self.get_columns(table)
columns = {}
for result in results:
columns[result['Field']] = self.convert_type(table, result)
# fake some fields for the 'bugs' table;
if table == 'bugs':
columns['longdesc'] = { 'field': 'longdesc',
'type': 'text',
'length': 0,
'default': None,
'null': 0, }
# From Bugzilla 2.17.1, the products and components tables
# are normalized. We fake something resembling the old
# schema, as 'product' and 'component' are really bug fields.
#
# When we drop support for Bugzilla 2.16.x and earlier, we
# can do this differently.
if self.features.has_key('normalized tables'):
del columns['product_id']
del columns['component_id']
product_cols = self.get_columns('products')
for c in product_cols:
if c['Field'] == 'name':
columns['product'] = self.convert_type('products', c)
columns['product']['field'] = 'product'
component_cols = self.get_columns('components')
for c in component_cols:
if c['Field'] == 'name':
columns['component'] = self.convert_type('components', c)
columns['component']['field'] = 'component'
# From Bugzilla 2.19.3, we no longer have any enum columns,
# but enum tables instead.
if self.features.has_key('enum tables'):
# Start off with a list of the built-in enum tables:
# field name param for default value
enum_tables = [('bug_severity', 'defaultseverity'),
('bug_status', 'defaultstatus'),
('op_sys', 'defaultopsys'),
('priority', 'defaultpriority'),
('rep_platform', 'defaultplatform'),
('resolution', None),
]
# From Bugzilla 3.0, also consider custom enum tables
if self.features.has_key('custom fields'):
for (name,cf) in self.custom_fields().items():
if cf.get('type') == 2: # FIELD_TYPE_SINGLE_SELECT
enum_tables.append((name, None))
# now get all the possible values
for (col, defparam) in enum_tables:
default = self.params.get(defparam)
values = self.fetch_rows_as_list_of_sequences('select value from %s where isactive=1 order by sortkey' % col,
'get possible values of %s' % col)
values = map(lambda x:x[0], values)
if default not in values:
default = values[0]
columns[col]['length'] = max(map(lambda x:len(x),
values))
columns[col]['type'] = 'enum'
columns[col]['values'] = values
columns[col]['default'] = default
# Non-enum custom fields already showed up in 'describe bugs'
return columns
# 6. BASIC OPERATIONS
# table_present(tablename) returns 1 if the named table is present
# in the database, 0 otherwise.
def table_present(self, tablename):
assert isinstance(tablename, types.StringType)
rows = self.execute("show tables like %s;"
% self.quote_string(tablename))
return rows == 1
# insert_row(table, dict) inserts a row (specified as a dictionary
# mapping column name to value) into the given table.
def insert_row(self, table, dict):
keys = []
values = []
params = []
for (key,value) in dict.items():
quoted = self.quote(table, key, value)
if isinstance(quoted, tuple):
params.append(quoted[1])
quoted = quoted[0]
keys.append(key)
values.append(quoted)
command = ("insert %s ( %s ) values ( %s );"
% (table, ','.join(keys), ','.join(values)))
rows = self.execute(command, params)
if rows != 1:
# "Couldn't insert row in table '%s'."
raise error, catalog.msg(116, table)
# insert_row_rid_sid is the same as insert_row, but includes rid and
# sid columns in the inserted row.
def insert_row_rid_sid(self, table, dict):
dict['rid'] = self.rid
dict['sid'] = self.sid
self.insert_row(table, dict)
# update_row(table, dict, where) updates the rows in the given table
# matching the "where" clause so that they have have the values
# specified by the dictionary mapping column name to value. An
# error is raised if there is no row, or more than one row, matching
# the "where" clause.
def update_row(self, table, dict, where):
updates = []
params = []
for (key,value) in dict.items():
quoted = self.quote(table, key, value)
if isinstance(quoted, tuple):
params.append(quoted[1])
quoted = quoted[0]
updates.append("%s = %s" % (key, quoted))
command = "update %s set %s where %s;" % (table,
','.join(updates),
where)
rows = self.execute(command, params)
if rows != 1:
# "Couldn't update row in table '%s' where %s."
raise error, catalog.msg(117, (table, where))
# update_row_rid_sid is the same as update_row, but includes rid and
# sid columns in the "where" clause.
def update_row_rid_sid(self, table, dict, where):
self.update_row(table, dict, where +
(' and rid = %s and sid = %s' %
(self.quote_string(self.rid),
self.quote_string(self.sid))))
# delete_rows(table, where) deletes all rows in the given table
# matching the "where" clause.
def delete_rows(self, table, where):
self.execute('delete from %s where %s;' % (table, where))
# delete_rows_rid_sid is the same as delete_rows, but includes rid
# and sid columns in the "where" clause.
def delete_rows_rid_sid(self, table, where):
self.delete_rows(table, where +
(' and rid = %s and sid = %s' %
(self.quote_string(self.rid),
self.quote_string(self.sid))))
# 7. BUGZILLA VERSIONS
#
# The methods in this section detect the Bugzilla version (by
# analyzing the database schema) and handle these differences
# between Bugzilla versions.
#
# bugzilla_version_map is a list of triplets. Each triplet is
# (Bugzilla version, tables added, tables removed). Using this
# table we can work out the Bugzilla version by executing "show
# tables" and then going through the versions until we find one
# whose tables are not all present. See [NB 2000-11-14a] for a
# variorum edition of the schemas for many Bugzilla versions.
bugzilla_version_map = [
('2.0', ['bugs',
'bugs_activity',
'cc',
'components',
'logincookies',
'profiles',
'versions',
], []),
('2.2', ['products',
], []),
('2.4', ['attachments',
'groups',
], []),
('2.6', ['dependencies',
], []),
('2.8', ['votes',
], []),
('2.10', ['watch',
'longdescs',
'profiles_activity',
'namedqueries',
'fielddefs',
'keywords',
'keyworddefs',
'milestones',
'shadowlog',
], []),
('2.12', ['duplicates',
], []),
('2.14', ['tokens',
], []),
('2.16', ['attachstatusdefs',
'attachstatuses',
], []),
('2.17.1', ['bug_group_map',
'user_group_map',
'group_group_map',
'flags',
'flagtypes',
'flaginclusions',
'flagexclusions',
'quips',
], ['attachstatusdefs',
'attachstatuses',
]),
('2.17.3', ['group_control_map',
], ['shadowlog',
]),
('2.17.5', ['series',
'series_categories',
'series_data',
'user_series_map',
], []),
('2.18', ['category_group_map',
], ['user_series_map',
]),
('2.19.1', ['classifications',
'whine_events',
'whine_queries',
'whine_schedules',
], []),
# the following tables actually added in 2.19.3
('2.20', ['bug_severity',
'bug_status',
'op_sys',
'priority',
'rep_platform',
'resolution',
'bz_schema',
'profile_setting',
'setting',
'setting_value',
'email_setting',
], []),
('2.22', ['attach_data', # actually added in 2.21.1
], []),
('3.0' , ['component_cc', # actually added in 2.23.3
'namedquery_group_map', # actually added in 2.23.3
'namedqueries_link_in_footer', # actually added in 2.23.3
], []),
]
# The list of Bugzilla versions supported by the P4DTI:
bugzilla_versions_supported = ['2.20',
'2.22',
'3.0',
]
# find_bugzilla_version() determines the Bugzilla version. It
# returns a pair: a string containing the Bugzilla version and a
# list of names of tables which are present in the database but not
# in the schema for that version of Bugzilla (this will mean either
# that the Bugzilla has been modified or extended, or is a future
# version).
def find_bugzilla_version(self):
tables = self.table_names()
# Eliminate P4DTI table (these all start with "p4dti_").
tables = filter(lambda x:x[:6] != 'p4dti_', tables)
# Work out the version.
best_version = None
version_tables = []
for (version, added, removed) in self.bugzilla_version_map:
version_tables = version_tables + added
for table in removed:
version_tables.remove(table)
# version_tables is now the list of tables in this version.
extra = tables[:]
missing = []
for table in version_tables:
if table in extra:
extra.remove(table)
else:
missing.append(table)
badness = len(missing) + len(extra)
if ((best_version is None) or
(badness < best_version[1])):
best_version = (version, badness, extra, missing)
if badness == 0: # exact match
break
return best_version
# check_bugzilla_version() finds the Bugzilla version (by inspecting
# the database, using find_bugzilla_version above), checks that it
# is supported by the P4DTI, and causes an error if not.
def check_bugzilla_version(self):
version, badness, extra, missing = self.find_bugzilla_version()
if missing:
if extra:
# "Bugzilla version %s detected, with these tables
# missing: %s and these additional tables present: %s.
# The P4DTI may fail to operate correctly."
self.log(131, (version, missing, extra))
else:
# "Bugzilla version %s detected, with these tables
# missing: %s. The P4DTI may fail to operate correctly."
self.log(132, (version, missing))
else:
if extra:
# "Bugzilla version %s detected, with these additional
# tables present: %s."
self.log(124, (version, extra))
else:
# "Bugzilla version %s detected."
self.log(125, version)
if version not in self.bugzilla_versions_supported:
# "Bugzilla version %s is not supported by the P4DTI."
raise error, catalog.msg(123, version)
if version >= '2.17':
self.features['normalized tables'] = 1
else:
self.features['bitset groups'] = 1
if version >= '2.17.3':
self.features['group_control_map'] = 1
if version >= '2.20':
self.features['enum tables'] = 1
if version >= '3.0':
self.features['custom fields'] = 1
self.bugzilla_version = version
def mysql_unsupported(self, version):
# "MySQL version %s is not supported by the P4DTI."
raise error, catalog.msg(134, version)
def mysql_deprecated_unicode(self, version):
# "MySQL version %s detected. Use of this
# version is deprecated due to poor Unicode support."
self.log(136, version)
def mysql_supported(self, version):
self.features['mysql_unicode'] = 1
# "MySQL version %s detected."
self.log(135, version)
# The MySQL versions supported by the P4DTI:
mysql_version_patterns = [(r'4\.0\.', mysql_deprecated_unicode),
(r'4\.1\.', mysql_supported),
(r'5\.0\.', mysql_supported),
(r'5\.1\.', mysql_supported),
]
# check_mysql_version() identifies the MySQL version and warns if
# it is not supported.
def check_mysql_version(self):
version_row = self.select_at_most_one_row(
"show variables like 'version'",
"MySQL version string")
if version_row:
mysql_version_string = version_row[1]
for (pattern, fn) in self.mysql_version_patterns:
if re.match(pattern, mysql_version_string):
fn(self, mysql_version_string)
return
self.mysql_unsupported(mysql_version_string)
else:
# "Could not determine MySQL version."
raise error, catalog.msg(137)
# check_utf8_config() attempts to determine whether the Bugzilla
# database is set to UTF-8 encoding.
#
# We just check the character set of the longdescs table
# and the longdescs.thetext column.
def check_utf8_config(self):
if (self.features.has_key('mysql_unicode') and
self.params.get('utf8','0') == '1'):
create_row = self.select_at_most_one_row(
"show create table longdescs",
"Get create table command to check character sets")
table_charset = 'unknown'
if create_row:
create_command = create_row[1]
# find the table character set
m = re.search('DEFAULT CHARSET=([a-z0-9]+)', create_command)
if m:
table_charset = m.group(1)
if table_charset != 'utf8':
# "Bugzilla is configured to store text in UTF-8
# encoding, but the Bugzilla database is not
# configured for that encoding (table '%s' has
# character set '%s'). Replication of non-ASCII text
# data may be incorrect."
self.log(138, ('longdescs', table_charset))
else:
# "Bugzilla table '%s' has character set '%s'."
self.log(140, ('longdescs', table_charset))
# table is UTF-8; check column character set
column_charset = table_charset
m = re.search('\\n *`thetext`(.*)\\n', create_command)
if m:
m = re.search('character set ([a-z0-9]+)', m.group(1))
if m:
column_charset = m.group(1)
else:
column_charset = 'not found'
if column_charset != 'utf8':
# "Bugzilla is configured to store text in UTF-8
# encoding, but the Bugzilla database is not
# configured for that encoding (column '%s' has
# character set '%s'). Replication of non-ASCII
# text data may be incorrect."
self.log(139, ('longdescs.thetext',
column_charset))
else:
# "Bugzilla column '%s' has character set '%s'."
self.log(141, ('longdescs.thetext',
column_charset))
self.features['unicode'] = 1
# 8. P4DTI SCHEMA EXTENSIONS
#
# See [NB 2000-11-14b] for the definition of the schema extensions.
#
# The P4DTI schema extensions have gone through a number of
# versions, described in detail in [NB 2000-11-14b, 5]. When the
# P4DTI is upgraded, it must check to see if the schema extensions
# belong to an old schema version; if so, they must be upgraded to
# the new schema version.
# p4dti_schema_extensions is a list of pairs (table, sql) giving the
# name of a table in the P4DTI schema extensions and the SQL command
# used to create it.
p4dti_schema_extensions = [
('p4dti_bugs',
"create table p4dti_bugs "
" ( bug_id mediumint not null primary key, "
" rid varchar(32) not null, "
" sid varchar(32) not null, "
" jobname text not null, "
" migrated datetime, "
" index(bug_id) "
" );"),
('p4dti_bugs_activity',
"create table p4dti_bugs_activity "
" ( bug_id mediumint not null, "
" who mediumint not null, "
" bug_when datetime not null, "
" fieldid mediumint not null, "
" oldvalue tinytext, "
" newvalue tinytext, "
" rid varchar(32) not null, "
" sid varchar(32) not null, "
" index(bug_id), "
" index(bug_when) "
" );"),
('p4dti_changelists',
"create table p4dti_changelists "
" ( changelist int not null, "
" rid varchar(32) not null, "
" sid varchar(32) not null, "
" user mediumint not null, "
" flags int not null, "
" description longtext not null, "
" client text not null, "
" p4date text not null, "
" unique (changelist, rid, sid) "
" );"),
('p4dti_fixes',
"create table p4dti_fixes "
" ( changelist int not null, "
" bug_id mediumint not null, "
" rid varchar(32) not null, "
" sid varchar(32) not null, "
" user mediumint not null, "
" client text not null, "
" status text not null, "
" p4date text not null, "
" unique (bug_id, changelist, rid, sid), "
" index (bug_id) "
" );"),
('p4dti_filespecs',
"create table p4dti_filespecs "
" ( bug_id mediumint not null, "
" rid varchar(32) not null, "
" sid varchar(32) not null, "
" filespec longtext not null, "
" index(bug_id)"
" );"),
('p4dti_config',
"create table p4dti_config "
" ( rid varchar(32) not null, "
" sid varchar(32) not null, "
" config_key text not null, "
" config_value longtext, "
" index(rid, sid)"
" );"),
('p4dti_replications',