aws.rb 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. # frozen_string_literal: true
  2. require 'accounts'
  3. require 'regions'
  4. require 'mime-types'
  5. require 'fog-aws'
  6. require 's3'
  7. require 'sqs'
  8. require 'date'
  9. require 'ipaddr'
  10. require 'neo4j'
  11. require 'rds'
  12. require 'lambdas'
  13. require 'neoinfra/config'
  14. require 'neoinfra/cloudwatch'
  15. RFC_1918 = [
  16. IPAddr.new('10.0.0.0/8'),
  17. IPAddr.new('172.16.0.0/12'),
  18. IPAddr.new('192.168.0.0/16')
  19. ].freeze
  20. # NeoInfra Account information
  21. module NeoInfra
  22. # Provide informations about the accounts available
  23. class Aws
  24. def initialize
  25. @cfg = NeoInfra::Config.new
  26. neo4j_url = "http://#{@cfg.neo4j[:host]}:#{@cfg.neo4j[:port]}"
  27. Neo4j::Session.open(:server_db, neo4j_url)
  28. end
  29. def regions
  30. account = @cfg.accounts.first
  31. base_conf = {
  32. provider: 'AWS',
  33. aws_access_key_id: account[:key],
  34. aws_secret_access_key: account[:secret]
  35. }
  36. conn = Fog::Compute.new(base_conf)
  37. conn.describe_regions.data[:body]['regionInfo'].collect { |x| x['regionName'] }
  38. end
  39. def region_count
  40. Region.all.length
  41. end
  42. def az_count
  43. Az.all.length
  44. end
  45. def azs(region)
  46. azs = []
  47. @cfg.accounts.each do |account|
  48. base_conf = {
  49. provider: 'AWS',
  50. aws_access_key_id: account[:key],
  51. aws_secret_access_key: account[:secret],
  52. region: region
  53. }
  54. begin
  55. conn = Fog::Compute.new(base_conf)
  56. conn.describe_availability_zones.data[:body]['availabilityZoneInfo'].collect { |x| x['zoneName'] }.each do |z|
  57. azs << z
  58. end
  59. rescue Exception => e
  60. puts "Zone couldn't load region #{region}: #{e.message}"
  61. end
  62. end
  63. azs.uniq
  64. end
  65. def load_regions
  66. regions.each do |region|
  67. next unless Region.where(region: region).empty?
  68. r = Region.new(
  69. region: region
  70. )
  71. r.save
  72. azs(region).each do |az|
  73. next unless Az.where(az: az).empty?
  74. a = Az.new(az: az)
  75. a.save
  76. AzRegion.create(from_node: a, to_node: Region.where(region: region).first)
  77. end
  78. end
  79. end
  80. def list_buckets
  81. buckets = []
  82. Bucket.all.order('n.size DESC').each do |b|
  83. buckets << {
  84. 'name' => b.name,
  85. 'size' => b.size,
  86. 'versioning' => b.versioning,
  87. 'creation' => b.creation,
  88. 'region' => b.region.region,
  89. 'owner' => b.owner.name
  90. }
  91. end
  92. buckets
  93. end
  94. def load_buckets
  95. aws = NeoInfra::Aws.new
  96. cw = NeoInfra::Cloudwatch.new
  97. @cfg.accounts.each do |account|
  98. base_conf = {
  99. provider: 'AWS',
  100. aws_access_key_id: account[:key],
  101. aws_secret_access_key: account[:secret]
  102. }
  103. aws.regions.each do |region|
  104. region_conf = { region: region }
  105. s = Fog::Storage.new(region_conf.merge(base_conf))
  106. s.directories.each do |bucket|
  107. begin
  108. next unless bucket.location == region
  109. next unless Bucket.where(name: bucket.key).empty?
  110. vers = bucket.versioning?.to_s
  111. crea = bucket.creation_date.to_s
  112. b = Bucket.new(
  113. name: bucket.key,
  114. versioning: vers,
  115. creation: crea,
  116. size: cw.get_bucket_size(account[:key], account[:secret], bucket.location, bucket.key)
  117. )
  118. b.save
  119. BucketRegion.create(from_node: b, to_node: Region.where(region: bucket.location).first)
  120. BucketAccount.create(from_node: b, to_node: AwsAccount.where(name: account[:name]).first)
  121. rescue Exception => e
  122. puts "ERROR: #{account[:name]}, #{region}, #{e.message}"
  123. end
  124. end
  125. end
  126. end
  127. end
  128. ####
  129. def load_security_groups
  130. @cfg.accounts.each do |account|
  131. base_conf = {
  132. provider: 'AWS',
  133. aws_access_key_id: account[:key],
  134. aws_secret_access_key: account[:secret]
  135. }
  136. regions.each do |region|
  137. region_conf = { region: region }
  138. begin
  139. conn = Fog::Compute.new(region_conf.merge(base_conf))
  140. rescue StandardError
  141. puts "Error loading security groups for region #{region}"
  142. next
  143. end
  144. conn.security_groups.all.each do |grp|
  145. if SecurityGroup.where(sg_id: grp.group_id).empty?
  146. g = SecurityGroup.new(
  147. sg_id: grp.group_id,
  148. name: grp.name,
  149. description: grp.description
  150. )
  151. g.save
  152. begin
  153. SecurityGroupOwner.create(from_node: g, to_node: AwsAccount.where(account_id: grp.owner_id).first)
  154. unless grp.vpc_id.nil?
  155. SecurityGroupVpc.create(from_node: g, to_node: Vpc.where(vpc_id: grp.vpc_id).first)
  156. end
  157. rescue
  158. puts "Account #{account[:name]} couldn't load the following security group:"
  159. p grp
  160. end
  161. end
  162. grp.ip_permissions.each do |iprule|
  163. next unless iprule['ipProtocol'] != '-1'
  164. iprule['ipRanges'].each do |r|
  165. to_port = if iprule['toPort'] == -1
  166. 65_535
  167. else
  168. iprule['toPort']
  169. end
  170. from_port = if iprule['fromPort'] == -1
  171. 0
  172. else
  173. iprule['fromPort']
  174. end
  175. if IpRules.where(
  176. cidr_block: r['cidrIp'],
  177. direction: 'ingress',
  178. proto: iprule['ipProtocol'],
  179. to_port: to_port,
  180. from_port: from_port
  181. ).empty?
  182. rl = IpRules.new(
  183. cidr_block: r['cidrIp'],
  184. direction: 'ingress',
  185. proto: iprule['ipProtocol'],
  186. to_port: to_port,
  187. from_port: from_port,
  188. private: RFC_1918.any? { |rfc| rfc.include?(IPAddr.new(r['cidrIp'])) }
  189. )
  190. rl.save
  191. end
  192. # TODO: remove duplicate Relationships
  193. SecurityGroupsIpRules.create(
  194. from_node: SecurityGroup.where(sg_id: grp.group_id).first,
  195. to_node: IpRules.where(
  196. cidr_block: r['cidrIp'],
  197. direction: 'ingress',
  198. proto: iprule['ipProtocol'],
  199. to_port: to_port,
  200. from_port: from_port,
  201. private: RFC_1918.any? { |rfc| rfc.include?(IPAddr.new(r['cidrIp'])) }
  202. ).first
  203. )
  204. end
  205. end
  206. end
  207. end
  208. end
  209. end
  210. def list_lambdas
  211. lambdas = []
  212. Lambda.all.each do |l|
  213. lambdas << {
  214. 'name' => l.name,
  215. 'runtime' => l.runtime,
  216. 'handler' => l.handler,
  217. 'lambda_timeout' => l.lambda_timeout,
  218. 'memorysize' => l.memorysize,
  219. 'last_modified' => l.last_modified,
  220. 'region' => l.region.region,
  221. 'owner' => l.owner.name
  222. }
  223. end
  224. lambdas
  225. end
  226. def list_rds
  227. rds = []
  228. Rds.all.each do |r|
  229. rds << {
  230. 'name' => r.name,
  231. 'size' => r.size,
  232. 'engine' => r.engine,
  233. 'engine_version' => r.engine_version,
  234. 'multi_az' => r.multi_az,
  235. 'endpoint' => r.endpoint,
  236. 'port' => r.port,
  237. 'allocated_storage' => r.allocated_storage,
  238. 'owner' => r.owner.name,
  239. 'az' => r.az.az,
  240. }
  241. end
  242. rds
  243. end
  244. def load_lambda
  245. @cfg.accounts.each do |account|
  246. base_conf = {
  247. aws_access_key_id: account[:key],
  248. aws_secret_access_key: account[:secret]
  249. }
  250. regions.each do |region|
  251. region_conf = { region: region }
  252. begin
  253. lambdas = Fog::AWS::Lambda.new(region_conf.merge(base_conf))
  254. lambdas.list_functions.data[:body]['Functions'].each do |f|
  255. next unless Lambda.where(name: f['FunctionArn']).empty?
  256. l = Lambda.new(
  257. name: f['FunctionName'],
  258. runtime: f['Runtime'],
  259. lambda_timeout: f['Timeout'],
  260. handler: f['Handler'],
  261. memorysize: f['MemorySize'],
  262. arn: f['FunctionArn'],
  263. codesize: f['CodeSize'],
  264. last_modified: f['LastModified']
  265. )
  266. l.save
  267. LambdaAccount.create(from_node: l, to_node: AwsAccount.where(name: account[:name]).first)
  268. LambdaRegion.create(from_node: l, to_node: Region.where(region: region).first)
  269. end
  270. rescue Exception => e
  271. puts "Error with #{region}: #{e.message}"
  272. next
  273. end
  274. end
  275. end
  276. end
  277. def list_dynamos
  278. dynamos = []
  279. Dynamo.all.order('n.sizebytes DESC').each do |d|
  280. dynamos << {
  281. 'name' => d.name,
  282. 'size' => d.sizebytes,
  283. 'itemcount' => d.itemcount,
  284. 'status' => d.status,
  285. 'creation' => d.creation,
  286. 'region' => d.region.region,
  287. 'owner' => d.owner.name
  288. }
  289. end
  290. dynamos
  291. end
  292. def load_dynamo
  293. @cfg.accounts.each do |account|
  294. base_conf = {
  295. aws_access_key_id: account[:key],
  296. aws_secret_access_key: account[:secret]
  297. }
  298. regions.each do |region|
  299. region_conf = { region: region }
  300. begin
  301. dyns = Fog::AWS::DynamoDB.new(region_conf.merge(base_conf))
  302. dyns.list_tables.data[:body]['TableNames'].each do |table|
  303. tb = dyns.describe_table(table).data[:body]['Table']
  304. next unless Dynamo.where(name: table['TableId']).empty?
  305. d = Dynamo.new(
  306. tableid: tb['TableId'],
  307. name: tb['TableName'],
  308. creation: Time.at(tb['CreationDateTime']).to_datetime.strftime('%F %H:%M:%S %Z'),
  309. arn: tb['TableArn'],
  310. itemcount: tb['ItemCount'],
  311. sizebytes: tb['TableSizeBytes'],
  312. status: tb['TableStatus'],
  313. readcap: tb['ProvisionedThroughput']['ReadCapacityUnits'],
  314. writecap: tb['ProvisionedThroughput']['WriteCapacityUnits'],
  315. capdecreases: tb['ProvisionedThroughput']['NumberOfDecreasesToday']
  316. )
  317. d.save
  318. DynamoAccount.create(from_node: d, to_node: AwsAccount.where(name: account[:name]).first)
  319. DynamoRegion.create(from_node: d, to_node: Region.where(region: region).first)
  320. end
  321. rescue Exception => e
  322. puts "Could not list Dynamos for region: #{region}: #{e.message}"
  323. next
  324. end
  325. end
  326. # dyns.list_tables.each do |table|
  327. # p table
  328. # end
  329. end
  330. end
  331. def list_queues
  332. queues = []
  333. SQSQueue.all.order('n.name DESC').each do |d|
  334. queues << {
  335. 'name' => d.name,
  336. 'modified' => d.modified,
  337. 'creation' => d.creation,
  338. 'retention' => d.retention,
  339. 'maxsize' => d.maxsize,
  340. 'region' => d.region.region,
  341. 'owner' => d.owner.name
  342. }
  343. end
  344. queues
  345. end
  346. def load_queues
  347. aws = NeoInfra::Aws.new
  348. cw = NeoInfra::Cloudwatch.new
  349. @cfg.accounts.each do |account|
  350. base_conf = {
  351. aws_access_key_id: account[:key],
  352. aws_secret_access_key: account[:secret]
  353. }
  354. aws.regions.each do |region|
  355. region_conf = { region: region }
  356. q = Fog::AWS::SQS.new(region_conf.merge(base_conf))
  357. q.list_queues.data[:body]['QueueUrls'].each do |x|
  358. next unless SQSQueue.where(url: x).empty?
  359. theAttrs = q.get_queue_attributes(x, "All").data[:body]['Attributes']
  360. z = SQSQueue.new(
  361. url: x,
  362. name: x.split('/')[-1],
  363. modified: theAttrs['LastModifiedTimestamp'],
  364. creation: theAttrs['CreatedTimestamp'],
  365. retention: theAttrs['MessageRetentionPeriod'],
  366. maxsize: theAttrs['MaximumMessageSize'],
  367. )
  368. z.save
  369. SQSQueueRegion.create(from_node: z, to_node: Region.where(region: region).first)
  370. SQSQueueAccount.create(from_node: z, to_node: AwsAccount.where(name: account[:name]).first)
  371. end
  372. end
  373. end
  374. end
  375. def load_rds
  376. @cfg.accounts.each do |account|
  377. base_conf = {
  378. aws_access_key_id: account[:key],
  379. aws_secret_access_key: account[:secret]
  380. }
  381. regions.each do |region|
  382. region_conf = { region: region }
  383. s = Fog::AWS::RDS.new(region_conf.merge(base_conf))
  384. s.servers.each do |rds|
  385. next unless Rds.where(name: rds.id).empty?
  386. r = Rds.new(
  387. name: rds.id,
  388. size: rds.flavor_id,
  389. engine: rds.engine,
  390. engine_version: rds.engine_version,
  391. multi_az: rds.multi_az.to_s,
  392. endpoint: rds.endpoint['Address'],
  393. port: rds.endpoint['Port'],
  394. allocated_storage: rds.allocated_storage
  395. )
  396. r.save
  397. begin
  398. RdsAz.create(from_node: r, to_node: Az.where(az: rds.availability_zone).first)
  399. RdsAccount.create(from_node: r, to_node: AwsAccount.where(name: account[:name]).first)
  400. rescue Exception => e
  401. puts "Account #{account[:name]} couldn't load the following rds: #{e.message}"
  402. p r
  403. next
  404. end
  405. end
  406. end
  407. end
  408. end
  409. end
  410. end