46c6cbbe9a791ccd197b96d62fdf5d9c54835f6f
[tests.git] / system / TestPlc.py
1 # $Id$
2 import os, os.path
3 import datetime
4 import time
5 import sys
6 import xmlrpclib
7 import datetime
8 import traceback
9 from types import StringTypes
10
11 import utils
12 from TestSite import TestSite
13 from TestNode import TestNode
14 from TestUser import TestUser
15 from TestKey import TestKey
16 from TestSlice import TestSlice
17 from TestSliver import TestSliver
18 from TestBox import TestBox
19 from TestSsh import TestSsh
20
21 # step methods must take (self, options) and return a boolean
22
23 def standby(minutes):
24         utils.header('Entering StandBy for %d mn'%minutes)
25         time.sleep(60*minutes)
26         return True
27
28 def standby_generic (func):
29     def actual(self,options):
30         minutes=int(func.__name__.split("_")[1])
31         return standby(minutes)
32     return actual
33
34 class TestPlc:
35
36     def __init__ (self,plc_spec):
37         self.plc_spec=plc_spec
38         self.path=os.path.dirname(sys.argv[0])
39         self.test_ssh=TestSsh(self.plc_spec['hostname'],self.path)
40         try:
41             self.vserverip=plc_spec['vserverip']
42             self.vservername=plc_spec['vservername']
43             self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
44             self.vserver=True
45         except:
46             self.vserver=False
47             self.url="https://%s:443/PLCAPI/"%plc_spec['hostname']
48 #        utils.header('Using API url %s'%self.url)
49         self.server=xmlrpclib.Server(self.url,allow_none=True)
50         
51     def name(self):
52         name=self.plc_spec['name']
53         if self.vserver:
54             return name+"[%s]"%self.vservername
55         else:
56             return name+"[chroot]"
57
58     def hostname(self):
59         return self.plc_spec['hostname']
60
61     def is_local (self):
62         return self.test_ssh.is_local()
63
64     # define the API methods on this object through xmlrpc
65     # would help, but not strictly necessary
66     def connect (self):
67         pass
68
69     def full_command(self,command):
70         return self.test_ssh.to_host(self.host_to_guest(command))
71
72     def run_in_guest (self,command):
73         return utils.system(self.full_command(command))
74     
75     def run_in_host (self,command):
76         return utils.system(self.test_ssh.to_host(command))
77
78     #command gets run in the chroot/vserver
79     def host_to_guest(self,command):
80         if self.vserver:
81             return "vserver %s exec %s"%(self.vservername,command)
82         else:
83             return "chroot /plc/root %s"%TestSsh.backslash_shell_specials(command)
84     
85     # copy a file to the myplc root image - pass in_data=True if the file must go in /plc/data
86     def copy_in_guest (self, localfile, remotefile, in_data=False):
87         if in_data:
88             chroot_dest="/plc/data"
89         else:
90             chroot_dest="/plc/root"
91         if self.is_local():
92             if not self.vserver:
93                 utils.system("cp %s %s/%s"%(localfile,chroot_dest,remotefile))
94             else:
95                 utils.system("cp %s /vservers/%s/%s"%(localfile,self.vservername,remotefile))
96         else:
97             if not self.vserver:
98                 utils.system("scp %s %s:%s/%s"%(localfile,self.hostname(),chroot_dest,remotefile))
99             else:
100                 utils.system("scp %s %s@/vservers/%s/%s"%(localfile,self.hostname(),self.vservername,remotefile))
101
102
103         # xxx quick n dirty
104     def run_in_guest_piped (self,local,remote):
105         return utils.system(local+" | "+self.full_command(remote))
106         
107
108     def auth_root (self):
109         return {'Username':self.plc_spec['PLC_ROOT_USER'],
110                 'AuthMethod':'password',
111                 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
112                 'Role' : self.plc_spec['role']
113                 }
114     def locate_site (self,sitename):
115         for site in self.plc_spec['sites']:
116             if site['site_fields']['name'] == sitename:
117                 return site
118             if site['site_fields']['login_base'] == sitename:
119                 return site
120         raise Exception,"Cannot locate site %s"%sitename
121         
122     def locate_node (self,nodename):
123         for site in self.plc_spec['sites']:
124             for node in site['nodes']:
125                 if node['node_fields']['hostname'] == nodename:
126                     return (site,node)
127         raise Exception,"Cannot locate node %s"%nodename
128         
129     def locate_key (self,keyname):
130         for key in self.plc_spec['keys']:
131             if key['name'] == keyname:
132                 return key
133         raise Exception,"Cannot locate key %s"%keyname
134
135     # all different hostboxes used in this plc
136     def gather_hostBoxes(self):
137         # maps on sites and nodes, return [ (host_box,test_node) ]
138         tuples=[]
139         for site_spec in self.plc_spec['sites']:
140             test_site = TestSite (self,site_spec)
141             for node_spec in site_spec['nodes']:
142                 test_node = TestNode (self, test_site, node_spec)
143                 if not test_node.is_real():
144                     tuples.append( (test_node.host_box(),test_node) )
145         # transform into a dict { 'host_box' -> [ hostnames .. ] }
146         result = {}
147         for (box,node) in tuples:
148             if not result.has_key(box):
149                 result[box]=[node]
150             else:
151                 result[box].append(node)
152         return result
153                     
154     # a step for checking this stuff
155     def showboxes (self,options):
156         print 'showboxes'
157         for (box,nodes) in self.gather_hostBoxes().iteritems():
158             print box,":"," + ".join( [ node.name() for node in nodes ] )
159         return True
160
161     # make this a valid step
162     def kill_all_qemus(self,options):
163         for (box,nodes) in self.gather_hostBoxes().iteritems():
164             # this is the brute force version, kill all qemus on that host box
165             TestBox(box,options.buildname).kill_all_qemus()
166         return True
167
168     # make this a valid step
169     def list_all_qemus(self,options):
170         for (box,nodes) in self.gather_hostBoxes().iteritems():
171             # this is the brute force version, kill all qemus on that host box
172             TestBox(box,options.buildname).list_all_qemus()
173         return True
174
175     # kill only the right qemus
176     def list_qemus(self,options):
177         for (box,nodes) in self.gather_hostBoxes().iteritems():
178             # the fine-grain version
179             for node in nodes:
180                 node.list_qemu()
181         return True
182
183     # kill only the right qemus
184     def kill_qemus(self,options):
185         for (box,nodes) in self.gather_hostBoxes().iteritems():
186             # the fine-grain version
187             for node in nodes:
188                 node.kill_qemu()
189         return True
190
191     def clear_ssh_config (self,options):
192         # install local ssh_config file as root's .ssh/config - ssh should be quiet
193         # dir might need creation first
194         self.run_in_guest("mkdir /root/.ssh")
195         self.run_in_guest("chmod 700 /root/.ssh")
196         # this does not work - > redirection somehow makes it until an argument to cat
197         #self.run_in_guest_piped("cat ssh_config","cat > /root/.ssh/config")
198         self.copy_in_guest("ssh_config","/root/.ssh/config",True)
199         return True
200             
201     #################### step methods
202
203     ### uninstall
204     def uninstall_chroot(self,options):
205         self.run_in_host('service plc safestop')
206         #####detecting the last myplc version installed and remove it
207         self.run_in_host('rpm -e myplc')
208         ##### Clean up the /plc directory
209         self.run_in_host('rm -rf  /plc/data')
210         ##### stop any running vservers
211         self.run_in_host('for vserver in $(ls /vservers/* | sed -e s,/vservers/,,) ; do vserver $vserver stop ; done')
212         return True
213
214     def uninstall_vserver(self,options):
215         self.run_in_host("vserver --silent %s delete"%self.vservername)
216         return True
217
218     def uninstall(self,options):
219         # if there's a chroot-based myplc running, and then a native-based myplc is being deployed
220         # it sounds safer to have the former uninstalled too
221         # now the vserver method cannot be invoked for chroot instances as vservername is required
222         if self.vserver:
223             self.uninstall_vserver(options)
224             self.uninstall_chroot(options)
225         else:
226             self.uninstall_chroot(options)
227         return True
228
229     ### install
230     def install_chroot(self,options):
231         # nothing to do
232         return True
233
234     def install_vserver(self,options):
235         # we need build dir for vtest-init-vserver
236         if self.is_local():
237             # a full path for the local calls
238             build_dir=self.path+"/build"
239         else:
240             # use a standard name - will be relative to HOME 
241             build_dir="options.buildname"
242         # run checkout in any case - would do an update if already exists
243         build_checkout = "svn checkout %s %s"%(options.build_url,build_dir)
244         if self.run_in_host(build_checkout) != 0:
245             raise Exception,"Cannot checkout build dir"
246         # the repo url is taken from myplc-url 
247         # with the last two steps (i386/myplc...) removed
248         repo_url = options.myplc_url
249         for level in [ 'rpmname','arch' ]:
250             repo_url = os.path.dirname(repo_url)
251         create_vserver="%s/vtest-init-vserver.sh %s %s -- --interface eth0:%s"%\
252             (build_dir,self.vservername,repo_url,self.vserverip)
253         if self.run_in_host(create_vserver) != 0:
254             raise Exception,"Could not create vserver for %s"%self.vservername
255         return True
256
257     def install(self,options):
258         if self.vserver:
259             return self.install_vserver(options)
260         else:
261             return self.install_chroot(options)
262     
263     ### install_rpm
264     def cache_rpm(self,url):
265         self.run_in_host('rm -rf *.rpm')
266         utils.header('Curling rpm from %s'%url)
267         id= self.run_in_host('curl -O '+url)
268         if (id != 0):
269                 raise Exception,"Could not get rpm from  %s"%url
270                 return False
271         return True
272
273     def install_rpm_chroot(self,options):
274         rpm = os.path.basename(options.myplc_url)
275         if (not os.path.isfile(rpm)):
276                 self.cache_rpm(options.myplc_url)
277         utils.header('Installing the :  %s'%rpm)
278         self.run_in_host('rpm -Uvh '+rpm)
279         self.run_in_host('service plc mount')
280         return True
281
282     def install_rpm_vserver(self,options):
283         self.run_in_guest("yum -y install myplc-native")
284         return True
285
286     def install_rpm(self,options):
287         if self.vserver:
288             return self.install_rpm_vserver(options)
289         else:
290             return self.install_rpm_chroot(options)
291
292     ### 
293     def configure(self,options):
294         tmpname='%s.plc-config-tty'%(self.name())
295         fileconf=open(tmpname,'w')
296         for var in [ 'PLC_NAME',
297                      'PLC_ROOT_PASSWORD',
298                      'PLC_ROOT_USER',
299                      'PLC_MAIL_ENABLED',
300                      'PLC_MAIL_SUPPORT_ADDRESS',
301                      'PLC_DB_HOST',
302                      'PLC_API_HOST',
303                      'PLC_WWW_HOST',
304                      'PLC_BOOT_HOST',
305                      'PLC_NET_DNS1',
306                      'PLC_NET_DNS2']:
307             fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
308         fileconf.write('w\n')
309         fileconf.write('q\n')
310         fileconf.close()
311         utils.system('cat %s'%tmpname)
312         self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
313         utils.system('rm %s'%tmpname)
314         return True
315
316     # the chroot install is slightly different to this respect
317     def start(self, options):
318         if self.vserver:
319             self.run_in_guest('service plc start')
320         else:
321             self.run_in_host('service plc start')
322         return True
323         
324     def stop(self, options):
325         if self.vserver:
326             self.run_in_guest('service plc stop')
327         else:
328             self.run_in_host('service plc stop')
329         return True
330         
331     # could use a TestKey class
332     def store_keys(self, options):
333         for key_spec in self.plc_spec['keys']:
334                 TestKey(self,key_spec).store_key()
335         return True
336
337     def clean_keys(self, options):
338         utils.system("rm -rf %s/keys/"%self.path)
339
340     def sites (self,options):
341         return self.do_sites(options)
342     
343     def clean_sites (self,options):
344         return self.do_sites(options,action="delete")
345     
346     def do_sites (self,options,action="add"):
347         for site_spec in self.plc_spec['sites']:
348             test_site = TestSite (self,site_spec)
349             if (action != "add"):
350                 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
351                 test_site.delete_site()
352                 # deleted with the site
353                 #test_site.delete_users()
354                 continue
355             else:
356                 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
357                 test_site.create_site()
358                 test_site.create_users()
359         return True
360
361     def nodes (self, options):
362         return self.do_nodes(options)
363     def clean_nodes (self, options):
364         return self.do_nodes(options,action="delete")
365
366     def do_nodes (self, options,action="add"):
367         for site_spec in self.plc_spec['sites']:
368             test_site = TestSite (self,site_spec)
369             if action != "add":
370                 utils.header("Deleting nodes in site %s"%test_site.name())
371                 for node_spec in site_spec['nodes']:
372                     test_node=TestNode(self,test_site,node_spec)
373                     utils.header("Deleting %s"%test_node.name())
374                     test_node.delete_node()
375             else:
376                 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
377                 for node_spec in site_spec['nodes']:
378                     utils.pprint('Creating node %s'%node_spec,node_spec)
379                     test_node = TestNode (self,test_site,node_spec)
380                     test_node.create_node ()
381         return True
382
383     # create nodegroups if needed, and populate
384     # no need for a clean_nodegroups if we are careful enough
385     def nodegroups (self, options):
386         # 1st pass to scan contents
387         groups_dict = {}
388         for site_spec in self.plc_spec['sites']:
389             test_site = TestSite (self,site_spec)
390             for node_spec in site_spec['nodes']:
391                 test_node=TestNode (self,test_site,node_spec)
392                 if node_spec.has_key('nodegroups'):
393                     nodegroupnames=node_spec['nodegroups']
394                     if isinstance(nodegroupnames,StringTypes):
395                         nodegroupnames = [ nodegroupnames ]
396                     for nodegroupname in nodegroupnames:
397                         if not groups_dict.has_key(nodegroupname):
398                             groups_dict[nodegroupname]=[]
399                         groups_dict[nodegroupname].append(test_node.name())
400         auth=self.auth_root()
401         for (nodegroupname,group_nodes) in groups_dict.iteritems():
402             try:
403                 self.server.GetNodeGroups(auth,{'name':nodegroupname})[0]
404             except:
405                 self.server.AddNodeGroup(auth,{'name':nodegroupname})
406             for node in group_nodes:
407                 self.server.AddNodeToNodeGroup(auth,node,nodegroupname)
408         return True
409
410     def all_hostnames (self) :
411         hostnames = []
412         for site_spec in self.plc_spec['sites']:
413             hostnames += [ node_spec['node_fields']['hostname'] \
414                            for node_spec in site_spec['nodes'] ]
415         return hostnames
416
417     # gracetime : during the first <gracetime> minutes nothing gets printed
418     def do_nodes_booted (self, minutes, gracetime=2):
419         # compute timeout
420         timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
421         graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
422         # the nodes that haven't checked yet - start with a full list and shrink over time
423         tocheck = self.all_hostnames()
424         utils.header("checking nodes %r"%tocheck)
425         # create a dict hostname -> status
426         status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
427         while tocheck:
428             # get their status
429             tocheck_status=self.server.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
430             # update status
431             for array in tocheck_status:
432                 hostname=array['hostname']
433                 boot_state=array['boot_state']
434                 if boot_state == 'boot':
435                     utils.header ("%s has reached the 'boot' state"%hostname)
436                 else:
437                     # if it's a real node, never mind
438                     (site_spec,node_spec)=self.locate_node(hostname)
439                     if TestNode.is_real_model(node_spec['node_fields']['model']):
440                         utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
441                         # let's cheat
442                         boot_state = 'boot'
443                     if datetime.datetime.now() > graceout:
444                         utils.header ("%s still in '%s' state"%(hostname,boot_state))
445                         graceout=datetime.datetime.now()+datetime.timedelta(1)
446                 status[hostname] = boot_state
447             # refresh tocheck
448             tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != 'boot' ]
449             if not tocheck:
450                 return True
451             if datetime.datetime.now() > timeout:
452                 for hostname in tocheck:
453                     utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
454                 return False
455             # otherwise, sleep for a while
456             time.sleep(15)
457         # only useful in empty plcs
458         return True
459
460     def nodes_booted(self,options):
461         return self.do_nodes_booted(minutes=0)
462     
463     #to scan and store the nodes's public keys and avoid to ask for confirmation when  ssh 
464     def scan_publicKeys(self,hostnames):
465         try:
466             temp_knownhosts="/root/known_hosts"
467             remote_knownhosts="/root/.ssh/known_hosts"
468             self.run_in_host("touch %s"%temp_knownhosts )
469             for hostname in hostnames:
470                 utils.header("Scan Public %s key and store it in the known_host file(under the root image) "%hostname)
471                 scan=self.run_in_host('ssh-keyscan -t rsa %s >> %s '%(hostname,temp_knownhosts))
472             #Store the public keys in the right root image
473             self.copy_in_guest(temp_knownhosts,remote_knownhosts,True)
474             #clean the temp keys file used
475             self.run_in_host('rm -f  %s '%temp_knownhosts )
476         except Exception, err:
477             print err
478             
479     def do_check_nodesSsh(self,minutes):
480         # compute timeout
481         timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
482         tocheck = self.all_hostnames()
483         self.scan_publicKeys(tocheck)
484         utils.header("checking Connectivity on nodes %r"%tocheck)
485         while tocheck:
486             for hostname in tocheck:
487                 # try to ssh in nodes
488                 access=self.run_in_guest('ssh -o StrictHostKeyChecking=no -o BatchMode=yes -i /etc/planetlab/root_ssh_key.rsa root@%s date'%hostname )
489                 if (not access):
490                     utils.header('The node %s is sshable -->'%hostname)
491                     # refresh tocheck
492                     tocheck.remove(hostname)
493                 else:
494                     (site_spec,node_spec)=self.locate_node(hostname)
495                     if TestNode.is_real_model(node_spec['node_fields']['model']):
496                         utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
497                         tocheck.remove(hostname)
498             if  not tocheck:
499                 return True
500             if datetime.datetime.now() > timeout:
501                 for hostname in tocheck:
502                     utils.header("FAILURE to ssh into %s"%hostname)
503                 return False
504             # otherwise, sleep for a while
505             time.sleep(15)
506         # only useful in empty plcs
507         return True
508         
509     def nodes_ssh(self, options):
510         return  self.do_check_nodesSsh(minutes=2)
511     
512     def bootcd (self, options):
513         for site_spec in self.plc_spec['sites']:
514             test_site = TestSite (self,site_spec)
515             for node_spec in site_spec['nodes']:
516                 test_node=TestNode (self,test_site,node_spec)
517                 test_node.prepare_area()
518                 test_node.create_boot_cd()
519                 test_node.configure_qemu()
520         return True
521
522     def do_check_intiscripts(self):
523         for site_spec in self.plc_spec['sites']:
524                 test_site = TestSite (self,site_spec)
525                 test_node = TestNode (self,test_site,site_spec['nodes'])
526                 for slice_spec in self.plc_spec['slices']:
527                         test_slice=TestSlice (self,test_site,slice_spec)
528                         test_sliver=TestSliver(self,test_node,test_slice)
529                         init_status=test_sliver.get_initscript(slice_spec)
530                         if (not init_status):
531                                 return False
532                 return init_status
533             
534     def check_initscripts(self, options):
535             return self.do_check_intiscripts()
536                     
537     def initscripts (self, options):
538         for initscript in self.plc_spec['initscripts']:
539             utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
540             self.server.AddInitScript(self.auth_root(),initscript['initscript_fields'])
541         return True
542
543     def slices (self, options):
544         return self.do_slices()
545
546     def clean_slices (self, options):
547         return self.do_slices("delete")
548
549     def do_slices (self,  action="add"):
550         for slice in self.plc_spec['slices']:
551             site_spec = self.locate_site (slice['sitename'])
552             test_site = TestSite(self,site_spec)
553             test_slice=TestSlice(self,test_site,slice)
554             if action != "add":
555                 utils.header("Deleting slices in site %s"%test_site.name())
556                 test_slice.delete_slice()
557             else:    
558                 utils.pprint("Creating slice",slice)
559                 test_slice.create_slice()
560                 utils.header('Created Slice %s'%slice['slice_fields']['name'])
561         return True
562         
563     def check_slices(self, options):
564         for slice_spec in self.plc_spec['slices']:
565             site_spec = self.locate_site (slice_spec['sitename'])
566             test_site = TestSite(self,site_spec)
567             test_slice=TestSlice(self,test_site,slice_spec)
568             status=test_slice.do_check_slice(options)
569             if (not status):
570                 return False
571         return status
572     
573     def start_nodes (self, options):
574         utils.header("Starting  nodes")
575         for site_spec in self.plc_spec['sites']:
576             TestSite(self,site_spec).start_nodes (options)
577         return True
578
579     def stop_nodes (self, options):
580         self.kill_all_qemus(options)
581         return True
582
583     def check_tcp (self, options):
584             #we just need to create a sliver object nothing else
585             test_sliver=TestSliver(self,
586                                    TestNode(self, TestSite(self,self.plc_spec['sites'][0]),
587                                             self.plc_spec['sites'][0]['nodes'][0]),
588                                    TestSlice(self,TestSite(self,self.plc_spec['sites'][0]),
589                                              self.plc_spec['slices']))
590             return test_sliver.do_check_tcp(self.plc_spec['tcp_param'],options)
591
592     # returns the filename to use for sql dump/restore, using options.dbname if set
593     def dbfile (self, database, options):
594         # uses options.dbname if it is found
595         try:
596             name=options.dbname
597             if not isinstance(name,StringTypes):
598                 raise Exception
599         except:
600             t=datetime.datetime.now()
601             d=t.date()
602             name=str(d)
603         return "/root/%s-%s.sql"%(database,name)
604
605     def db_dump(self, options):
606         
607         dump=self.dbfile("planetab4",options)
608         self.run_in_guest('pg_dump -U pgsqluser planetlab4 -f '+ dump)
609         utils.header('Dumped planetlab4 database in %s'%dump)
610         return True
611
612     def db_restore(self, options):
613         dump=self.dbfile("planetab4",options)
614         ##stop httpd service
615         self.run_in_guest('service httpd stop')
616         # xxx - need another wrapper
617         self.run_in_guest_piped('echo drop database planetlab4','psql --user=pgsqluser template1')
618         self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4')
619         self.run_in_guest('psql -U pgsqluser planetlab4 -f '+dump)
620         ##starting httpd service
621         self.run_in_guest('service httpd start')
622
623         utils.header('Database restored from ' + dump)
624
625     @standby_generic 
626     def standby_1(): pass
627     @standby_generic 
628     def standby_2(): pass
629     @standby_generic 
630     def standby_3(): pass
631     @standby_generic 
632     def standby_4(): pass
633     @standby_generic 
634     def standby_5(): pass
635     @standby_generic 
636     def standby_6(): pass
637     @standby_generic 
638     def standby_7(): pass
639     @standby_generic 
640     def standby_8(): pass
641     @standby_generic 
642     def standby_9(): pass
643     @standby_generic 
644     def standby_10(): pass
645     @standby_generic 
646     def standby_11(): pass
647     @standby_generic 
648     def standby_12(): pass
649     @standby_generic 
650     def standby_13(): pass
651     @standby_generic 
652     def standby_14(): pass
653     @standby_generic 
654     def standby_15(): pass
655     @standby_generic 
656     def standby_16(): pass
657     @standby_generic 
658     def standby_17(): pass
659     @standby_generic 
660     def standby_18(): pass
661     @standby_generic 
662     def standby_19(): pass
663     @standby_generic 
664     def standby_20(): pass
665